Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,553 @@
//! Coherence Integration
//!
//! Integrates quantum and topological concepts with Prime-Radiant's coherence framework.
//! Provides measures of structural coherence using topological energy and quantum fidelity.
use super::complex_matrix::{Complex64, ComplexMatrix, ComplexVector};
use super::density_matrix::DensityMatrix;
use super::persistent_homology::{PersistenceDiagram, PersistentHomologyComputer};
use super::quantum_state::QuantumState;
use super::simplicial_complex::SimplicialComplex;
use super::topological_invariant::TopologicalInvariant;
use super::{constants, QuantumTopologyError, Result};
use std::collections::HashMap;
/// Topological energy measure for structural coherence
#[derive(Debug, Clone)]
pub struct TopologicalEnergy {
/// Total topological energy (lower = more coherent structure)
pub total_energy: f64,
/// Energy contribution from each Betti number
pub betti_energies: Vec<f64>,
/// Persistence-based energy (lifetime-weighted)
pub persistence_energy: f64,
/// Euler characteristic contribution
pub euler_energy: f64,
/// Topological complexity measure
pub complexity: f64,
}
impl TopologicalEnergy {
/// Create zero energy (perfectly coherent)
pub fn zero() -> Self {
Self {
total_energy: 0.0,
betti_energies: vec![],
persistence_energy: 0.0,
euler_energy: 0.0,
complexity: 0.0,
}
}
/// Compute from topological invariants
pub fn from_invariants(invariants: &TopologicalInvariant) -> Self {
// Energy increases with topological complexity
let betti_energies: Vec<f64> = invariants
.betti_numbers
.iter()
.enumerate()
.map(|(k, &b)| {
// Weight higher-dimensional features more (they represent deeper structure)
let weight = (k + 1) as f64;
weight * b as f64
})
.collect();
// Euler characteristic deviation from 1 (single connected component)
let euler_energy = (invariants.euler_characteristic - 1).abs() as f64;
// Total Betti energy
let betti_total: f64 = betti_energies.iter().sum();
// Complexity based on total Betti numbers
let complexity = invariants.total_betti() as f64;
Self {
total_energy: betti_total + euler_energy,
betti_energies,
persistence_energy: 0.0, // Set separately from persistence diagram
euler_energy,
complexity,
}
}
/// Compute from persistence diagram
pub fn from_persistence(diagram: &PersistenceDiagram) -> Self {
// Persistence energy: sum of persistence values weighted by dimension
let mut betti_energies = vec![0.0; diagram.max_dimension + 1];
let mut persistence_energy = 0.0;
for pair in &diagram.pairs {
if !pair.is_essential() {
let pers = pair.persistence();
let weight = (pair.dimension + 1) as f64;
persistence_energy += weight * pers;
if pair.dimension < betti_energies.len() {
betti_energies[pair.dimension] += pers;
}
}
}
// Complexity: total number of features
let complexity = diagram.pairs.len() as f64;
Self {
total_energy: persistence_energy,
betti_energies,
persistence_energy,
euler_energy: 0.0,
complexity,
}
}
/// Check if structure is coherent (energy below threshold)
pub fn is_coherent(&self, threshold: f64) -> bool {
self.total_energy <= threshold
}
/// Normalize energy to [0, 1] range
pub fn normalized(&self) -> f64 {
// Use sigmoid for bounded output
1.0 / (1.0 + (-self.total_energy).exp())
}
}
/// Quantum coherence metric between states
#[derive(Debug, Clone)]
pub struct QuantumCoherenceMetric {
/// Fidelity between states (1 = identical)
pub fidelity: f64,
/// Trace distance (0 = identical)
pub trace_distance: f64,
/// Relative entropy (0 = identical)
pub relative_entropy: f64,
/// Purity of state 1
pub purity_1: f64,
/// Purity of state 2
pub purity_2: f64,
}
impl QuantumCoherenceMetric {
/// Compute metric between two pure states
pub fn from_pure_states(state1: &QuantumState, state2: &QuantumState) -> Result<Self> {
let fidelity = state1.fidelity(state2)?;
// Trace distance for pure states: sqrt(1 - F)
let trace_distance = (1.0 - fidelity).sqrt();
// Relative entropy not well-defined for orthogonal pure states
let relative_entropy = if fidelity > constants::EPSILON {
-fidelity.ln()
} else {
f64::INFINITY
};
Ok(Self {
fidelity,
trace_distance,
relative_entropy,
purity_1: 1.0,
purity_2: 1.0,
})
}
/// Compute metric between two density matrices
pub fn from_density_matrices(rho1: &DensityMatrix, rho2: &DensityMatrix) -> Result<Self> {
let fidelity = rho1.fidelity(rho2)?;
let trace_distance = rho1.trace_distance(rho2)?;
let relative_entropy = rho1.relative_entropy(rho2)?;
Ok(Self {
fidelity,
trace_distance,
relative_entropy,
purity_1: rho1.purity(),
purity_2: rho2.purity(),
})
}
/// Check if states are coherent (similar)
pub fn is_coherent(&self, fidelity_threshold: f64) -> bool {
self.fidelity >= fidelity_threshold
}
/// Overall coherence score (0 = incoherent, 1 = fully coherent)
pub fn coherence_score(&self) -> f64 {
// Weighted combination of fidelity and (1 - trace_distance)
0.7 * self.fidelity + 0.3 * (1.0 - self.trace_distance.min(1.0))
}
}
/// Quantum fidelity between two states (pure state case)
pub fn quantum_fidelity(state1: &QuantumState, state2: &QuantumState) -> Result<f64> {
state1.fidelity(state2)
}
/// Quantum trace distance between two density matrices
pub fn quantum_trace_distance(rho1: &DensityMatrix, rho2: &DensityMatrix) -> Result<f64> {
rho1.trace_distance(rho2)
}
/// Analyzer for topological coherence in belief graphs
pub struct TopologicalCoherenceAnalyzer {
/// Maximum dimension for homology computation
max_dimension: usize,
/// Persistence threshold for significant features
persistence_threshold: f64,
/// Coherence threshold
coherence_threshold: f64,
}
impl TopologicalCoherenceAnalyzer {
/// Create a new analyzer
pub fn new(max_dimension: usize, persistence_threshold: f64, coherence_threshold: f64) -> Self {
Self {
max_dimension,
persistence_threshold,
coherence_threshold,
}
}
/// Create with default parameters
pub fn default() -> Self {
Self {
max_dimension: 2,
persistence_threshold: 0.1,
coherence_threshold: 1.0,
}
}
/// Compute topological energy from belief graph structure
///
/// The belief graph is represented as:
/// - vertices: belief nodes (as points in embedding space)
/// - edges: connections between beliefs
pub fn analyze_belief_graph(
&self,
node_embeddings: &[Vec<f64>],
edges: &[(usize, usize)],
edge_weights: &[f64],
) -> TopologicalEnergy {
if node_embeddings.is_empty() {
return TopologicalEnergy::zero();
}
// Build simplicial complex from graph
let complex = self.graph_to_complex(node_embeddings.len(), edges);
// Compute topological invariants
let invariants = TopologicalInvariant::from_complex(&complex);
// Compute base energy from invariants
let mut energy = TopologicalEnergy::from_invariants(&invariants);
// Compute persistence for finer analysis
if !node_embeddings.is_empty() {
let ph = PersistentHomologyComputer::new(self.max_dimension);
let max_dist = self.estimate_max_distance(node_embeddings);
let diagram = ph.compute_from_points(node_embeddings, max_dist);
// Filter by persistence threshold
let filtered = diagram.filter_by_persistence(self.persistence_threshold);
let persistence_energy = TopologicalEnergy::from_persistence(&filtered);
energy.persistence_energy = persistence_energy.persistence_energy;
energy.total_energy += persistence_energy.persistence_energy;
}
// Add weighted edge contribution
let edge_energy = self.compute_edge_energy(edges, edge_weights);
energy.total_energy += edge_energy;
energy
}
/// Convert graph to simplicial complex
fn graph_to_complex(&self, num_vertices: usize, edges: &[(usize, usize)]) -> SimplicialComplex {
use super::simplicial_complex::Simplex;
let mut complex = SimplicialComplex::new();
// Add vertices
for i in 0..num_vertices {
complex.add_simplex(Simplex::vertex(i));
}
// Add edges
for &(i, j) in edges {
if i < num_vertices && j < num_vertices {
complex.add_simplex(Simplex::edge(i, j));
}
}
// Optionally add triangles for cliques (higher coherence)
if self.max_dimension >= 2 {
self.add_triangles(&mut complex, num_vertices, edges);
}
complex
}
/// Add triangles (2-simplices) for graph cliques
fn add_triangles(
&self,
complex: &mut SimplicialComplex,
num_vertices: usize,
edges: &[(usize, usize)],
) {
use super::simplicial_complex::Simplex;
use std::collections::HashSet;
// Build adjacency set
let mut adj: Vec<HashSet<usize>> = vec![HashSet::new(); num_vertices];
for &(i, j) in edges {
if i < num_vertices && j < num_vertices {
adj[i].insert(j);
adj[j].insert(i);
}
}
// Find triangles
for &(i, j) in edges {
if i >= num_vertices || j >= num_vertices {
continue;
}
// Find common neighbors
for &k in adj[i].iter() {
if k > j && adj[j].contains(&k) {
complex.add_simplex(Simplex::triangle(i, j, k));
}
}
}
}
/// Estimate maximum distance for filtration
fn estimate_max_distance(&self, points: &[Vec<f64>]) -> f64 {
if points.len() < 2 {
return 1.0;
}
// Sample some distances
let mut max_dist = 0.0_f64;
let sample_size = points.len().min(100);
for i in 0..sample_size {
for j in (i + 1)..sample_size {
let dist = euclidean_distance(&points[i], &points[j]);
max_dist = max_dist.max(dist);
}
}
max_dist.max(1.0)
}
/// Compute energy from edge weights
fn compute_edge_energy(&self, edges: &[(usize, usize)], weights: &[f64]) -> f64 {
if edges.is_empty() || weights.is_empty() {
return 0.0;
}
// High variance in edge weights indicates inconsistency
let mean: f64 = weights.iter().sum::<f64>() / weights.len() as f64;
let variance: f64 = weights.iter().map(|w| (w - mean).powi(2)).sum::<f64>() / weights.len() as f64;
variance.sqrt()
}
/// Analyze coherence evolution over time
pub fn analyze_temporal_coherence(
&self,
snapshots: &[TopologicalEnergy],
) -> CoherenceEvolution {
if snapshots.is_empty() {
return CoherenceEvolution::empty();
}
let energies: Vec<f64> = snapshots.iter().map(|e| e.total_energy).collect();
// Compute trend
let n = energies.len();
let mean_energy = energies.iter().sum::<f64>() / n as f64;
let trend = if n > 1 {
let first_half: f64 = energies[..n / 2].iter().sum::<f64>() / (n / 2) as f64;
let second_half: f64 = energies[n / 2..].iter().sum::<f64>() / (n - n / 2) as f64;
second_half - first_half
} else {
0.0
};
// Compute volatility
let volatility = if n > 1 {
energies
.windows(2)
.map(|w| (w[1] - w[0]).abs())
.sum::<f64>()
/ (n - 1) as f64
} else {
0.0
};
CoherenceEvolution {
mean_energy,
trend,
volatility,
max_energy: energies.iter().cloned().fold(f64::NEG_INFINITY, f64::max),
min_energy: energies.iter().cloned().fold(f64::INFINITY, f64::min),
is_stable: volatility < self.coherence_threshold / 10.0,
is_improving: trend < 0.0,
}
}
/// Check coherence against threshold
pub fn is_coherent(&self, energy: &TopologicalEnergy) -> bool {
energy.is_coherent(self.coherence_threshold)
}
}
/// Evolution of coherence over time
#[derive(Debug, Clone)]
pub struct CoherenceEvolution {
/// Mean energy over time
pub mean_energy: f64,
/// Energy trend (positive = worsening, negative = improving)
pub trend: f64,
/// Energy volatility
pub volatility: f64,
/// Maximum energy observed
pub max_energy: f64,
/// Minimum energy observed
pub min_energy: f64,
/// Is the system stable?
pub is_stable: bool,
/// Is the coherence improving?
pub is_improving: bool,
}
impl CoherenceEvolution {
/// Create empty evolution
pub fn empty() -> Self {
Self {
mean_energy: 0.0,
trend: 0.0,
volatility: 0.0,
max_energy: 0.0,
min_energy: 0.0,
is_stable: true,
is_improving: false,
}
}
}
/// Euclidean distance helper
fn euclidean_distance(a: &[f64], b: &[f64]) -> f64 {
a.iter()
.zip(b.iter())
.map(|(x, y)| (x - y).powi(2))
.sum::<f64>()
.sqrt()
}
/// Compute topological coherence energy for a belief graph
///
/// This is the main entry point for integration with Prime-Radiant's coherence engine.
pub fn topological_coherence_energy(
node_embeddings: &[Vec<f64>],
edges: &[(usize, usize)],
edge_weights: &[f64],
) -> TopologicalEnergy {
let analyzer = TopologicalCoherenceAnalyzer::default();
analyzer.analyze_belief_graph(node_embeddings, edges, edge_weights)
}
/// Quantum coherence metric between two states
///
/// Returns the fidelity (overlap) between two quantum states.
pub fn quantum_coherence_metric(state: &QuantumState, reference: &QuantumState) -> Result<f64> {
state.fidelity(reference)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_topological_energy_zero() {
let energy = TopologicalEnergy::zero();
assert!(energy.is_coherent(0.1));
assert_eq!(energy.total_energy, 0.0);
}
#[test]
fn test_topological_energy_from_invariants() {
let invariants = TopologicalInvariant::from_betti(vec![1, 0, 0]);
let energy = TopologicalEnergy::from_invariants(&invariants);
// Single connected component: β_0 = 1, χ = 1
assert!((energy.euler_energy - 0.0).abs() < 1e-10);
}
#[test]
fn test_quantum_coherence_metric() {
let state1 = QuantumState::ground_state(1);
let state2 = QuantumState::uniform_superposition(1);
let metric = QuantumCoherenceMetric::from_pure_states(&state1, &state2).unwrap();
assert!(metric.fidelity > 0.0 && metric.fidelity < 1.0);
assert!(metric.trace_distance > 0.0);
}
#[test]
fn test_topological_coherence_analyzer() {
let analyzer = TopologicalCoherenceAnalyzer::default();
// Simple triangle graph
let embeddings = vec![
vec![0.0, 0.0],
vec![1.0, 0.0],
vec![0.5, 0.866],
];
let edges = vec![(0, 1), (1, 2), (0, 2)];
let weights = vec![1.0, 1.0, 1.0];
let energy = analyzer.analyze_belief_graph(&embeddings, &edges, &weights);
// Triangle should have low energy (coherent structure)
assert!(energy.total_energy.is_finite());
}
#[test]
fn test_temporal_coherence() {
let analyzer = TopologicalCoherenceAnalyzer::default();
let snapshots = vec![
TopologicalEnergy { total_energy: 1.0, ..TopologicalEnergy::zero() },
TopologicalEnergy { total_energy: 0.9, ..TopologicalEnergy::zero() },
TopologicalEnergy { total_energy: 0.8, ..TopologicalEnergy::zero() },
TopologicalEnergy { total_energy: 0.7, ..TopologicalEnergy::zero() },
];
let evolution = analyzer.analyze_temporal_coherence(&snapshots);
assert!(evolution.is_improving); // Energy decreasing
assert!(evolution.trend < 0.0);
}
#[test]
fn test_coherence_entry_points() {
// Test main entry points
let embeddings = vec![vec![0.0], vec![1.0]];
let edges = vec![(0, 1)];
let weights = vec![1.0];
let energy = topological_coherence_energy(&embeddings, &edges, &weights);
assert!(energy.total_energy.is_finite());
let state = QuantumState::ground_state(1);
let reference = QuantumState::uniform_superposition(1);
let metric = quantum_coherence_metric(&state, &reference).unwrap();
assert!(metric >= 0.0 && metric <= 1.0);
}
}

View File

@@ -0,0 +1,990 @@
//! Complex Matrix and Vector Operations
//!
//! Provides fundamental complex linear algebra operations for quantum computing.
//! Uses `num-complex` for complex number arithmetic with f64 precision.
use std::ops::{Add, Mul, Sub};
use ruvector_solver::types::CsrMatrix;
/// Complex number type alias using f64 precision
pub type Complex64 = num_complex::Complex<f64>;
/// Complex vector type
#[derive(Debug, Clone, PartialEq)]
pub struct ComplexVector {
/// Vector elements
pub data: Vec<Complex64>,
}
impl ComplexVector {
/// Create a new complex vector from components
pub fn new(data: Vec<Complex64>) -> Self {
Self { data }
}
/// Create a zero vector of given dimension
pub fn zeros(dim: usize) -> Self {
Self {
data: vec![Complex64::new(0.0, 0.0); dim],
}
}
/// Create a vector from real components
pub fn from_real(reals: &[f64]) -> Self {
Self {
data: reals.iter().map(|&r| Complex64::new(r, 0.0)).collect(),
}
}
/// Create a computational basis state |i⟩
pub fn basis_state(dim: usize, index: usize) -> Self {
let mut data = vec![Complex64::new(0.0, 0.0); dim];
if index < dim {
data[index] = Complex64::new(1.0, 0.0);
}
Self { data }
}
/// Dimension of the vector
pub fn dim(&self) -> usize {
self.data.len()
}
/// Compute the L2 norm (Euclidean norm)
pub fn norm(&self) -> f64 {
self.norm_squared().sqrt()
}
/// Compute the squared norm
pub fn norm_squared(&self) -> f64 {
self.data.iter().map(|c| c.norm_sqr()).sum()
}
/// Normalize the vector in place
pub fn normalize(&mut self) {
let n = self.norm();
if n > 1e-15 {
for c in &mut self.data {
*c /= n;
}
}
}
/// Return a normalized copy
pub fn normalized(&self) -> Self {
let mut result = self.clone();
result.normalize();
result
}
/// Inner product ⟨self|other⟩ = self† · other
pub fn inner(&self, other: &ComplexVector) -> Complex64 {
assert_eq!(self.dim(), other.dim(), "Dimension mismatch in inner product");
self.data
.iter()
.zip(other.data.iter())
.map(|(a, b)| a.conj() * b)
.sum()
}
/// Outer product |self⟩⟨other| = self ⊗ other†
pub fn outer(&self, other: &ComplexVector) -> ComplexMatrix {
let rows = self.dim();
let cols = other.dim();
let mut data = vec![Complex64::new(0.0, 0.0); rows * cols];
for i in 0..rows {
for j in 0..cols {
data[i * cols + j] = self.data[i] * other.data[j].conj();
}
}
ComplexMatrix { data, rows, cols }
}
/// Tensor product |self⟩ ⊗ |other⟩
pub fn tensor(&self, other: &ComplexVector) -> Self {
let mut result = Vec::with_capacity(self.dim() * other.dim());
for a in &self.data {
for b in &other.data {
result.push(a * b);
}
}
Self { data: result }
}
/// Element-wise conjugate
pub fn conjugate(&self) -> Self {
Self {
data: self.data.iter().map(|c| c.conj()).collect(),
}
}
/// Scale the vector
pub fn scale(&self, factor: Complex64) -> Self {
Self {
data: self.data.iter().map(|c| c * factor).collect(),
}
}
/// Add two vectors
pub fn add(&self, other: &ComplexVector) -> Self {
assert_eq!(self.dim(), other.dim(), "Dimension mismatch in addition");
Self {
data: self
.data
.iter()
.zip(other.data.iter())
.map(|(a, b)| a + b)
.collect(),
}
}
/// Subtract two vectors
pub fn sub(&self, other: &ComplexVector) -> Self {
assert_eq!(self.dim(), other.dim(), "Dimension mismatch in subtraction");
Self {
data: self
.data
.iter()
.zip(other.data.iter())
.map(|(a, b)| a - b)
.collect(),
}
}
}
impl Add for &ComplexVector {
type Output = ComplexVector;
fn add(self, other: &ComplexVector) -> ComplexVector {
ComplexVector::add(self, other)
}
}
impl Sub for &ComplexVector {
type Output = ComplexVector;
fn sub(self, other: &ComplexVector) -> ComplexVector {
ComplexVector::sub(self, other)
}
}
/// Complex matrix for quantum operations
#[derive(Debug, Clone, PartialEq)]
pub struct ComplexMatrix {
/// Row-major data storage
pub data: Vec<Complex64>,
/// Number of rows
pub rows: usize,
/// Number of columns
pub cols: usize,
}
impl ComplexMatrix {
/// Create a new matrix from row-major data
pub fn new(data: Vec<Complex64>, rows: usize, cols: usize) -> Self {
assert_eq!(data.len(), rows * cols, "Data length must match dimensions");
Self { data, rows, cols }
}
/// Create a zero matrix
pub fn zeros(rows: usize, cols: usize) -> Self {
Self {
data: vec![Complex64::new(0.0, 0.0); rows * cols],
rows,
cols,
}
}
/// Create an identity matrix
pub fn identity(n: usize) -> Self {
let mut data = vec![Complex64::new(0.0, 0.0); n * n];
for i in 0..n {
data[i * n + i] = Complex64::new(1.0, 0.0);
}
Self {
data,
rows: n,
cols: n,
}
}
/// Create a matrix from real values
pub fn from_real(reals: &[f64], rows: usize, cols: usize) -> Self {
assert_eq!(reals.len(), rows * cols, "Data length must match dimensions");
Self {
data: reals.iter().map(|&r| Complex64::new(r, 0.0)).collect(),
rows,
cols,
}
}
/// Create a diagonal matrix from a vector
pub fn diagonal(diag: &[Complex64]) -> Self {
let n = diag.len();
let mut data = vec![Complex64::new(0.0, 0.0); n * n];
for (i, &val) in diag.iter().enumerate() {
data[i * n + i] = val;
}
Self {
data,
rows: n,
cols: n,
}
}
/// Get element at (row, col)
pub fn get(&self, row: usize, col: usize) -> Complex64 {
assert!(row < self.rows && col < self.cols, "Index out of bounds");
self.data[row * self.cols + col]
}
/// Set element at (row, col)
pub fn set(&mut self, row: usize, col: usize, value: Complex64) {
assert!(row < self.rows && col < self.cols, "Index out of bounds");
self.data[row * self.cols + col] = value;
}
/// Check if matrix is square
pub fn is_square(&self) -> bool {
self.rows == self.cols
}
/// Compute the trace (sum of diagonal elements)
pub fn trace(&self) -> Complex64 {
assert!(self.is_square(), "Trace requires square matrix");
(0..self.rows).map(|i| self.get(i, i)).sum()
}
/// Compute the conjugate transpose (Hermitian adjoint) A†
pub fn adjoint(&self) -> Self {
let mut data = vec![Complex64::new(0.0, 0.0); self.rows * self.cols];
for i in 0..self.rows {
for j in 0..self.cols {
data[j * self.rows + i] = self.get(i, j).conj();
}
}
Self {
data,
rows: self.cols,
cols: self.rows,
}
}
/// Compute the transpose
pub fn transpose(&self) -> Self {
let mut data = vec![Complex64::new(0.0, 0.0); self.rows * self.cols];
for i in 0..self.rows {
for j in 0..self.cols {
data[j * self.rows + i] = self.get(i, j);
}
}
Self {
data,
rows: self.cols,
cols: self.rows,
}
}
/// Check if matrix is Hermitian (A = A†)
pub fn is_hermitian(&self, tolerance: f64) -> bool {
if !self.is_square() {
return false;
}
for i in 0..self.rows {
for j in 0..=i {
let diff = (self.get(i, j) - self.get(j, i).conj()).norm();
if diff > tolerance {
return false;
}
}
}
true
}
/// Check if all entries have negligible imaginary parts
pub fn is_real_valued(&self, tolerance: f64) -> bool {
self.data.iter().all(|c| c.im.abs() <= tolerance)
}
/// Convert a real-valued ComplexMatrix to CsrMatrix<f64>.
/// Returns None if any entry has a significant imaginary part.
pub fn to_csr_real(&self, tolerance: f64) -> Option<CsrMatrix<f64>> {
if !self.is_real_valued(tolerance) {
return None;
}
let entries: Vec<(usize, usize, f64)> = (0..self.rows)
.flat_map(|i| {
(0..self.cols)
.filter_map(move |j| {
let val = self.get(i, j).re;
if val.abs() > tolerance {
Some((i, j, val))
} else {
None
}
})
})
.collect();
Some(CsrMatrix::<f64>::from_coo(self.rows, self.cols, entries))
}
/// Check if matrix is unitary (A†A = I)
pub fn is_unitary(&self, tolerance: f64) -> bool {
if !self.is_square() {
return false;
}
let product = self.adjoint().matmul(self);
let identity = ComplexMatrix::identity(self.rows);
for i in 0..self.rows {
for j in 0..self.cols {
let diff = (product.get(i, j) - identity.get(i, j)).norm();
if diff > tolerance {
return false;
}
}
}
true
}
/// Matrix-vector multiplication
pub fn matvec(&self, v: &ComplexVector) -> ComplexVector {
assert_eq!(self.cols, v.dim(), "Dimension mismatch in matrix-vector product");
let mut result = Vec::with_capacity(self.rows);
for i in 0..self.rows {
let mut sum = Complex64::new(0.0, 0.0);
for j in 0..self.cols {
sum += self.get(i, j) * v.data[j];
}
result.push(sum);
}
ComplexVector::new(result)
}
/// Matrix-matrix multiplication
pub fn matmul(&self, other: &ComplexMatrix) -> Self {
assert_eq!(
self.cols, other.rows,
"Dimension mismatch in matrix multiplication"
);
let mut data = vec![Complex64::new(0.0, 0.0); self.rows * other.cols];
for i in 0..self.rows {
for j in 0..other.cols {
let mut sum = Complex64::new(0.0, 0.0);
for k in 0..self.cols {
sum += self.get(i, k) * other.get(k, j);
}
data[i * other.cols + j] = sum;
}
}
Self {
data,
rows: self.rows,
cols: other.cols,
}
}
/// Scale the matrix by a complex factor
pub fn scale(&self, factor: Complex64) -> Self {
Self {
data: self.data.iter().map(|c| c * factor).collect(),
rows: self.rows,
cols: self.cols,
}
}
/// Add two matrices
pub fn add(&self, other: &ComplexMatrix) -> Self {
assert_eq!(
(self.rows, self.cols),
(other.rows, other.cols),
"Dimension mismatch in matrix addition"
);
Self {
data: self
.data
.iter()
.zip(other.data.iter())
.map(|(a, b)| a + b)
.collect(),
rows: self.rows,
cols: self.cols,
}
}
/// Subtract two matrices
pub fn sub(&self, other: &ComplexMatrix) -> Self {
assert_eq!(
(self.rows, self.cols),
(other.rows, other.cols),
"Dimension mismatch in matrix subtraction"
);
Self {
data: self
.data
.iter()
.zip(other.data.iter())
.map(|(a, b)| a - b)
.collect(),
rows: self.rows,
cols: self.cols,
}
}
/// Tensor (Kronecker) product A ⊗ B
pub fn tensor(&self, other: &ComplexMatrix) -> Self {
let new_rows = self.rows * other.rows;
let new_cols = self.cols * other.cols;
let mut data = vec![Complex64::new(0.0, 0.0); new_rows * new_cols];
for i in 0..self.rows {
for j in 0..self.cols {
let a_ij = self.get(i, j);
for k in 0..other.rows {
for l in 0..other.cols {
let row = i * other.rows + k;
let col = j * other.cols + l;
data[row * new_cols + col] = a_ij * other.get(k, l);
}
}
}
}
Self {
data,
rows: new_rows,
cols: new_cols,
}
}
/// Compute the Frobenius norm ||A||_F = sqrt(Tr(A†A))
pub fn frobenius_norm(&self) -> f64 {
self.data.iter().map(|c| c.norm_sqr()).sum::<f64>().sqrt()
}
/// Compute partial trace over subsystem B for a bipartite system AB
/// Assumes dimensions: total = dim_a * dim_b, traces out subsystem B
pub fn partial_trace_b(&self, dim_a: usize, dim_b: usize) -> Self {
assert!(self.is_square(), "Partial trace requires square matrix");
assert_eq!(self.rows, dim_a * dim_b, "Dimensions must match");
let mut result = Self::zeros(dim_a, dim_a);
for i in 0..dim_a {
for j in 0..dim_a {
let mut sum = Complex64::new(0.0, 0.0);
for k in 0..dim_b {
let row = i * dim_b + k;
let col = j * dim_b + k;
sum += self.get(row, col);
}
result.set(i, j, sum);
}
}
result
}
/// Compute partial trace over subsystem A for a bipartite system AB
pub fn partial_trace_a(&self, dim_a: usize, dim_b: usize) -> Self {
assert!(self.is_square(), "Partial trace requires square matrix");
assert_eq!(self.rows, dim_a * dim_b, "Dimensions must match");
let mut result = Self::zeros(dim_b, dim_b);
for i in 0..dim_b {
for j in 0..dim_b {
let mut sum = Complex64::new(0.0, 0.0);
for k in 0..dim_a {
let row = k * dim_b + i;
let col = k * dim_b + j;
sum += self.get(row, col);
}
result.set(i, j, sum);
}
}
result
}
/// Compute eigenvalues of a real symmetric matrix using the Jacobi method.
///
/// This is significantly more numerically stable than power iteration + deflation,
/// especially for matrices with clustered eigenvalues (common in density matrices).
/// Uses CsrMatrix for efficient matrix operations when the matrix is sparse.
fn eigenvalues_real_symmetric(&self, max_iterations: usize, tolerance: f64) -> Vec<Complex64> {
let n = self.rows;
// Work with a real copy (row-major f64)
let mut a: Vec<Vec<f64>> = (0..n)
.map(|i| (0..n).map(|j| self.get(i, j).re).collect())
.collect();
// Jacobi eigenvalue algorithm: repeatedly zero off-diagonal elements
for _ in 0..max_iterations * n {
// Find largest off-diagonal element
let mut max_val = 0.0f64;
let mut p = 0;
let mut q = 1;
for i in 0..n {
for j in (i + 1)..n {
if a[i][j].abs() > max_val {
max_val = a[i][j].abs();
p = i;
q = j;
}
}
}
// Check convergence
if max_val < tolerance {
break;
}
// Compute Jacobi rotation angle
let theta = if (a[p][p] - a[q][q]).abs() < 1e-15 {
std::f64::consts::FRAC_PI_4
} else {
0.5 * (2.0 * a[p][q] / (a[p][p] - a[q][q])).atan()
};
let c = theta.cos();
let s = theta.sin();
// Apply Jacobi rotation: A' = J^T A J
// Update rows p and q
let mut new_row_p = vec![0.0; n];
let mut new_row_q = vec![0.0; n];
for j in 0..n {
new_row_p[j] = c * a[p][j] + s * a[q][j];
new_row_q[j] = -s * a[p][j] + c * a[q][j];
}
for j in 0..n {
a[p][j] = new_row_p[j];
a[q][j] = new_row_q[j];
}
// Update columns p and q
for i in 0..n {
let aip = a[i][p];
let aiq = a[i][q];
a[i][p] = c * aip + s * aiq;
a[i][q] = -s * aip + c * aiq;
}
}
// Eigenvalues are on the diagonal
let mut eigenvalues: Vec<Complex64> = (0..n)
.map(|i| Complex64::new(a[i][i], 0.0))
.collect();
// Sort by magnitude (descending)
eigenvalues.sort_by(|a, b| b.norm().partial_cmp(&a.norm()).unwrap_or(std::cmp::Ordering::Equal));
eigenvalues
}
/// Compute eigenvalues using QR iteration (simplified version)
/// Returns eigenvalues in descending order of magnitude
pub fn eigenvalues(&self, max_iterations: usize, tolerance: f64) -> Vec<Complex64> {
assert!(self.is_square(), "Eigenvalues require square matrix");
let n = self.rows;
if n == 0 {
return vec![];
}
if n == 1 {
return vec![self.get(0, 0)];
}
// For 2x2 matrices, use closed-form solution
if n == 2 {
let a = self.get(0, 0);
let b = self.get(0, 1);
let c = self.get(1, 0);
let d = self.get(1, 1);
let trace = a + d;
let det = a * d - b * c;
let discriminant = trace * trace - Complex64::new(4.0, 0.0) * det;
let sqrt_disc = discriminant.sqrt();
let lambda1 = (trace + sqrt_disc) / Complex64::new(2.0, 0.0);
let lambda2 = (trace - sqrt_disc) / Complex64::new(2.0, 0.0);
return vec![lambda1, lambda2];
}
// For Hermitian + real-valued matrices, use Jacobi (much more stable)
if self.is_hermitian(tolerance) && self.is_real_valued(tolerance) {
return self.eigenvalues_real_symmetric(max_iterations, tolerance);
}
// For larger matrices, use power iteration to find dominant eigenvalue
// This is a simplified implementation
let mut eigenvalues = Vec::with_capacity(n);
let mut working_matrix = self.clone();
for _ in 0..n.min(max_iterations) {
// Power iteration to find largest eigenvalue
let mut v = ComplexVector::new(vec![Complex64::new(1.0, 0.0); working_matrix.rows]);
v.normalize();
let mut eigenvalue = Complex64::new(0.0, 0.0);
for _ in 0..max_iterations {
let new_v = working_matrix.matvec(&v);
let new_eigenvalue = v.inner(&new_v);
if (new_eigenvalue - eigenvalue).norm() < tolerance {
eigenvalue = new_eigenvalue;
break;
}
eigenvalue = new_eigenvalue;
v = new_v.normalized();
}
eigenvalues.push(eigenvalue);
// Deflate matrix (simplified)
if working_matrix.rows > 1 {
working_matrix = working_matrix.sub(&v.outer(&v).scale(eigenvalue));
}
}
// Sort by magnitude (descending)
eigenvalues.sort_by(|a, b| b.norm().partial_cmp(&a.norm()).unwrap_or(std::cmp::Ordering::Equal));
eigenvalues
}
}
impl Mul for &ComplexMatrix {
type Output = ComplexMatrix;
fn mul(self, other: &ComplexMatrix) -> ComplexMatrix {
self.matmul(other)
}
}
impl Add for &ComplexMatrix {
type Output = ComplexMatrix;
fn add(self, other: &ComplexMatrix) -> ComplexMatrix {
ComplexMatrix::add(self, other)
}
}
impl Sub for &ComplexMatrix {
type Output = ComplexMatrix;
fn sub(self, other: &ComplexMatrix) -> ComplexMatrix {
ComplexMatrix::sub(self, other)
}
}
/// Common quantum gates as matrices
pub mod gates {
use super::*;
/// Pauli X gate (NOT gate)
pub fn pauli_x() -> ComplexMatrix {
ComplexMatrix::new(
vec![
Complex64::new(0.0, 0.0),
Complex64::new(1.0, 0.0),
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
],
2,
2,
)
}
/// Pauli Y gate
pub fn pauli_y() -> ComplexMatrix {
ComplexMatrix::new(
vec![
Complex64::new(0.0, 0.0),
Complex64::new(0.0, -1.0),
Complex64::new(0.0, 1.0),
Complex64::new(0.0, 0.0),
],
2,
2,
)
}
/// Pauli Z gate
pub fn pauli_z() -> ComplexMatrix {
ComplexMatrix::new(
vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(-1.0, 0.0),
],
2,
2,
)
}
/// Hadamard gate
pub fn hadamard() -> ComplexMatrix {
let s = 1.0 / 2.0_f64.sqrt();
ComplexMatrix::new(
vec![
Complex64::new(s, 0.0),
Complex64::new(s, 0.0),
Complex64::new(s, 0.0),
Complex64::new(-s, 0.0),
],
2,
2,
)
}
/// Phase gate S = sqrt(Z)
pub fn phase() -> ComplexMatrix {
ComplexMatrix::new(
vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 1.0),
],
2,
2,
)
}
/// T gate (pi/8 gate)
pub fn t_gate() -> ComplexMatrix {
let phase = Complex64::from_polar(1.0, std::f64::consts::FRAC_PI_4);
ComplexMatrix::new(
vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
phase,
],
2,
2,
)
}
/// CNOT gate (controlled-NOT)
pub fn cnot() -> ComplexMatrix {
ComplexMatrix::new(
vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
],
4,
4,
)
}
/// SWAP gate
pub fn swap() -> ComplexMatrix {
ComplexMatrix::new(
vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(1.0, 0.0),
],
4,
4,
)
}
/// Rotation around X axis by angle theta
pub fn rx(theta: f64) -> ComplexMatrix {
let c = (theta / 2.0).cos();
let s = (theta / 2.0).sin();
ComplexMatrix::new(
vec![
Complex64::new(c, 0.0),
Complex64::new(0.0, -s),
Complex64::new(0.0, -s),
Complex64::new(c, 0.0),
],
2,
2,
)
}
/// Rotation around Y axis by angle theta
pub fn ry(theta: f64) -> ComplexMatrix {
let c = (theta / 2.0).cos();
let s = (theta / 2.0).sin();
ComplexMatrix::new(
vec![
Complex64::new(c, 0.0),
Complex64::new(-s, 0.0),
Complex64::new(s, 0.0),
Complex64::new(c, 0.0),
],
2,
2,
)
}
/// Rotation around Z axis by angle theta
pub fn rz(theta: f64) -> ComplexMatrix {
let phase_neg = Complex64::from_polar(1.0, -theta / 2.0);
let phase_pos = Complex64::from_polar(1.0, theta / 2.0);
ComplexMatrix::new(
vec![
phase_neg,
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
phase_pos,
],
2,
2,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_complex_vector_basics() {
let v = ComplexVector::new(vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 1.0),
]);
assert_eq!(v.dim(), 2);
assert!((v.norm_squared() - 2.0).abs() < 1e-10);
}
#[test]
fn test_inner_product() {
let v1 = ComplexVector::new(vec![Complex64::new(1.0, 0.0), Complex64::new(0.0, 0.0)]);
let v2 = ComplexVector::new(vec![Complex64::new(0.0, 0.0), Complex64::new(1.0, 0.0)]);
// Orthogonal vectors have zero inner product
let inner = v1.inner(&v2);
assert!((inner.norm()) < 1e-10);
// Self inner product equals norm squared
let self_inner = v1.inner(&v1);
assert!((self_inner.re - 1.0).abs() < 1e-10);
}
#[test]
fn test_matrix_operations() {
let identity = ComplexMatrix::identity(2);
assert!(identity.is_square());
assert!((identity.trace().re - 2.0).abs() < 1e-10);
}
#[test]
fn test_hadamard_unitarity() {
let h = gates::hadamard();
assert!(h.is_unitary(1e-10));
}
#[test]
fn test_pauli_matrices() {
let x = gates::pauli_x();
let y = gates::pauli_y();
let z = gates::pauli_z();
// X² = I
let x2 = x.matmul(&x);
assert!((x2.get(0, 0).re - 1.0).abs() < 1e-10);
assert!((x2.get(1, 1).re - 1.0).abs() < 1e-10);
// Y² = I
let y2 = y.matmul(&y);
assert!((y2.get(0, 0).re - 1.0).abs() < 1e-10);
// Z² = I
let z2 = z.matmul(&z);
assert!((z2.get(0, 0).re - 1.0).abs() < 1e-10);
// All are Hermitian
assert!(x.is_hermitian(1e-10));
assert!(y.is_hermitian(1e-10));
assert!(z.is_hermitian(1e-10));
}
#[test]
fn test_tensor_product() {
let v1 = ComplexVector::new(vec![Complex64::new(1.0, 0.0), Complex64::new(0.0, 0.0)]);
let v2 = ComplexVector::new(vec![Complex64::new(0.0, 0.0), Complex64::new(1.0, 0.0)]);
let tensor = v1.tensor(&v2);
assert_eq!(tensor.dim(), 4);
// |0⟩ ⊗ |1⟩ = |01⟩ = [0, 1, 0, 0]
assert!((tensor.data[1].re - 1.0).abs() < 1e-10);
}
#[test]
fn test_partial_trace() {
// Create a 4x4 matrix (2-qubit system)
let mut m = ComplexMatrix::zeros(4, 4);
// Set it to |00⟩⟨00| + |11⟩⟨11| (maximally entangled diagonal)
m.set(0, 0, Complex64::new(0.5, 0.0));
m.set(3, 3, Complex64::new(0.5, 0.0));
// Partial trace over B should give maximally mixed state on A
let reduced = m.partial_trace_b(2, 2);
assert_eq!(reduced.rows, 2);
assert!((reduced.get(0, 0).re - 0.5).abs() < 1e-10);
assert!((reduced.get(1, 1).re - 0.5).abs() < 1e-10);
}
#[test]
fn test_eigenvalues_2x2() {
// Identity matrix has eigenvalues 1, 1
let identity = ComplexMatrix::identity(2);
let eigenvalues = identity.eigenvalues(100, 1e-10);
assert_eq!(eigenvalues.len(), 2);
for ev in &eigenvalues {
assert!((ev.re - 1.0).abs() < 1e-5);
}
// Pauli Z has eigenvalues +1, -1
let z = gates::pauli_z();
let z_eigenvalues = z.eigenvalues(100, 1e-10);
assert_eq!(z_eigenvalues.len(), 2);
}
}

View File

@@ -0,0 +1,645 @@
//! Density Matrix Representation
//!
//! Mixed quantum states represented as density matrices (positive semidefinite,
//! trace-one Hermitian operators).
use super::complex_matrix::{Complex64, ComplexMatrix, ComplexVector};
use super::quantum_state::QuantumState;
use super::{constants, QuantumTopologyError, Result};
/// Mixed quantum state representation
#[derive(Debug, Clone)]
pub struct MixedState {
/// Ensemble of pure states with probabilities
pub states: Vec<(f64, QuantumState)>,
}
impl MixedState {
/// Create a mixed state from an ensemble
pub fn new(states: Vec<(f64, QuantumState)>) -> Result<Self> {
let total_prob: f64 = states.iter().map(|(p, _)| p).sum();
if (total_prob - 1.0).abs() > constants::EPSILON {
return Err(QuantumTopologyError::InvalidDensityMatrix(
format!("Probabilities sum to {} instead of 1", total_prob),
));
}
Ok(Self { states })
}
/// Create a pure state (single state with probability 1)
pub fn pure(state: QuantumState) -> Self {
Self {
states: vec![(1.0, state)],
}
}
/// Create maximally mixed state (I/d)
pub fn maximally_mixed(dimension: usize) -> Self {
let prob = 1.0 / dimension as f64;
let states: Vec<(f64, QuantumState)> = (0..dimension)
.map(|i| (prob, QuantumState::basis_state(dimension, i).unwrap()))
.collect();
Self { states }
}
/// Convert to density matrix
pub fn to_density_matrix(&self) -> DensityMatrix {
if self.states.is_empty() {
return DensityMatrix::zeros(1);
}
let dim = self.states[0].1.dimension;
let mut matrix = ComplexMatrix::zeros(dim, dim);
for (prob, state) in &self.states {
let outer = state.to_vector().outer(&state.to_vector());
matrix = matrix.add(&outer.scale(Complex64::new(*prob, 0.0)));
}
DensityMatrix { matrix }
}
/// Check if this is a pure state
pub fn is_pure(&self) -> bool {
self.states.len() == 1 && (self.states[0].0 - 1.0).abs() < constants::EPSILON
}
}
/// Density matrix representation of a quantum state
#[derive(Debug, Clone)]
pub struct DensityMatrix {
/// The density matrix ρ
pub matrix: ComplexMatrix,
}
impl DensityMatrix {
/// Create a new density matrix, validating it's a valid quantum state
pub fn new(matrix: ComplexMatrix) -> Result<Self> {
// Check square
if !matrix.is_square() {
return Err(QuantumTopologyError::InvalidDensityMatrix(
"Matrix must be square".to_string(),
));
}
// Check Hermitian
if !matrix.is_hermitian(constants::EPSILON) {
return Err(QuantumTopologyError::InvalidDensityMatrix(
"Matrix must be Hermitian".to_string(),
));
}
// Check trace = 1
let trace = matrix.trace();
if (trace.re - 1.0).abs() > constants::EPSILON || trace.im.abs() > constants::EPSILON {
return Err(QuantumTopologyError::InvalidDensityMatrix(
format!("Trace must be 1, got {}", trace),
));
}
Ok(Self { matrix })
}
/// Create without validation (for internal use)
pub fn new_unchecked(matrix: ComplexMatrix) -> Self {
Self { matrix }
}
/// Create a zero density matrix
pub fn zeros(dimension: usize) -> Self {
Self {
matrix: ComplexMatrix::zeros(dimension, dimension),
}
}
/// Create a pure state density matrix |ψ⟩⟨ψ|
pub fn from_pure_state(state: &QuantumState) -> Self {
Self {
matrix: state.to_density_matrix(),
}
}
/// Create a pure state density matrix from a vector
pub fn from_vector(v: &ComplexVector) -> Self {
Self {
matrix: v.outer(v),
}
}
/// Create the maximally mixed state I/d
pub fn maximally_mixed(dimension: usize) -> Self {
let mut matrix = ComplexMatrix::identity(dimension);
let scale = Complex64::new(1.0 / dimension as f64, 0.0);
matrix = matrix.scale(scale);
Self { matrix }
}
/// Create a thermal (Gibbs) state ρ = exp(-βH) / Z
pub fn thermal_state(hamiltonian: &ComplexMatrix, beta: f64) -> Result<Self> {
if !hamiltonian.is_square() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: hamiltonian.rows,
got: hamiltonian.cols,
});
}
// For diagonal Hamiltonians, this is straightforward
// For general case, we need matrix exponential
let dim = hamiltonian.rows;
let eigenvalues = hamiltonian.eigenvalues(100, 1e-10);
// Compute partition function Z = Σ exp(-β Eᵢ)
let partition: f64 = eigenvalues.iter().map(|ev| (-beta * ev.re).exp()).sum();
// Create thermal state (diagonal approximation)
let mut matrix = ComplexMatrix::zeros(dim, dim);
for (i, ev) in eigenvalues.iter().enumerate().take(dim) {
let prob = (-beta * ev.re).exp() / partition;
matrix.set(i, i, Complex64::new(prob, 0.0));
}
Ok(Self { matrix })
}
/// Dimension of the Hilbert space
pub fn dimension(&self) -> usize {
self.matrix.rows
}
/// Compute the trace
pub fn trace(&self) -> Complex64 {
self.matrix.trace()
}
/// Compute the purity Tr(ρ²)
pub fn purity(&self) -> f64 {
self.matrix.matmul(&self.matrix).trace().re
}
/// Check if this is a pure state (purity ≈ 1)
pub fn is_pure(&self, tolerance: f64) -> bool {
(self.purity() - 1.0).abs() < tolerance
}
/// Compute the von Neumann entropy S(ρ) = -Tr(ρ log ρ)
pub fn von_neumann_entropy(&self) -> f64 {
let eigenvalues = self.matrix.eigenvalues(100, 1e-10);
let mut entropy = 0.0;
for ev in eigenvalues {
let lambda = ev.re.max(0.0); // Eigenvalues should be non-negative
if lambda > constants::EPSILON {
entropy -= lambda * lambda.ln();
}
}
entropy
}
/// Compute the linear entropy S_L(ρ) = 1 - Tr(ρ²)
pub fn linear_entropy(&self) -> f64 {
1.0 - self.purity()
}
/// Expectation value ⟨A⟩ = Tr(ρA)
pub fn expectation(&self, observable: &ComplexMatrix) -> Result<Complex64> {
if observable.rows != self.dimension() || observable.cols != self.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension(),
got: observable.rows,
});
}
Ok(self.matrix.matmul(observable).trace())
}
/// Apply a unitary transformation: ρ → U ρ U†
pub fn apply_unitary(&self, unitary: &ComplexMatrix) -> Result<Self> {
if unitary.rows != self.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension(),
got: unitary.rows,
});
}
let u_rho = unitary.matmul(&self.matrix);
let result = u_rho.matmul(&unitary.adjoint());
Ok(Self { matrix: result })
}
/// Partial trace over subsystem B (ρ_AB → ρ_A)
pub fn partial_trace_b(&self, dim_a: usize, dim_b: usize) -> Result<Self> {
if dim_a * dim_b != self.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: dim_a * dim_b,
got: self.dimension(),
});
}
Ok(Self {
matrix: self.matrix.partial_trace_b(dim_a, dim_b),
})
}
/// Partial trace over subsystem A (ρ_AB → ρ_B)
pub fn partial_trace_a(&self, dim_a: usize, dim_b: usize) -> Result<Self> {
if dim_a * dim_b != self.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: dim_a * dim_b,
got: self.dimension(),
});
}
Ok(Self {
matrix: self.matrix.partial_trace_a(dim_a, dim_b),
})
}
/// Compute quantum fidelity F(ρ, σ) = (Tr√(√ρ σ √ρ))²
/// For classical simulation, we use a simplified formula
pub fn fidelity(&self, other: &DensityMatrix) -> Result<f64> {
if self.dimension() != other.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension(),
got: other.dimension(),
});
}
// For pure states: F(|ψ⟩⟨ψ|, |φ⟩⟨φ|) = |⟨ψ|φ⟩|²
// For general states, use F = Tr(ρσ) + 2√(det(ρ)det(σ)) for 2x2
// For larger dimensions, approximate with Tr(ρσ)
let dim = self.dimension();
if dim == 2 {
// Closed form for 2x2
let trace_product = self.matrix.matmul(&other.matrix).trace().re;
let det_self = self.determinant_2x2();
let det_other = other.determinant_2x2();
let fidelity = trace_product + 2.0 * (det_self * det_other).sqrt();
Ok(fidelity.max(0.0).min(1.0))
} else {
// Approximate with Tr(ρσ) for larger dimensions
// This is the Hilbert-Schmidt fidelity
let trace_product = self.matrix.matmul(&other.matrix).trace().re;
Ok(trace_product.max(0.0).min(1.0))
}
}
/// Compute 2x2 determinant
fn determinant_2x2(&self) -> f64 {
if self.dimension() != 2 {
return 0.0;
}
let a = self.matrix.get(0, 0);
let b = self.matrix.get(0, 1);
let c = self.matrix.get(1, 0);
let d = self.matrix.get(1, 1);
(a * d - b * c).re
}
/// Compute trace distance D(ρ, σ) = (1/2)||ρ - σ||₁
pub fn trace_distance(&self, other: &DensityMatrix) -> Result<f64> {
if self.dimension() != other.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension(),
got: other.dimension(),
});
}
let diff = self.matrix.sub(&other.matrix);
// Compute eigenvalues of difference
let eigenvalues = diff.eigenvalues(100, 1e-10);
// Trace norm is sum of absolute values of eigenvalues
let trace_norm: f64 = eigenvalues.iter().map(|ev| ev.norm()).sum();
Ok(trace_norm / 2.0)
}
/// Compute relative entropy S(ρ||σ) = Tr(ρ(log ρ - log σ))
/// Only defined when supp(ρ) ⊆ supp(σ)
pub fn relative_entropy(&self, other: &DensityMatrix) -> Result<f64> {
if self.dimension() != other.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension(),
got: other.dimension(),
});
}
// For diagonal matrices (classical distributions)
// D(ρ||σ) = Σᵢ ρᵢᵢ (log ρᵢᵢ - log σᵢᵢ)
let mut rel_entropy = 0.0;
for i in 0..self.dimension() {
let rho_ii = self.matrix.get(i, i).re;
let sigma_ii = other.matrix.get(i, i).re;
if rho_ii > constants::EPSILON {
if sigma_ii < constants::EPSILON {
// Infinite relative entropy
return Ok(f64::INFINITY);
}
rel_entropy += rho_ii * (rho_ii.ln() - sigma_ii.ln());
}
}
Ok(rel_entropy)
}
/// Tensor product ρσ
pub fn tensor(&self, other: &DensityMatrix) -> Self {
Self {
matrix: self.matrix.tensor(&other.matrix),
}
}
/// Compute the Bloch vector for a single qubit (2x2 density matrix)
pub fn bloch_vector(&self) -> Result<[f64; 3]> {
if self.dimension() != 2 {
return Err(QuantumTopologyError::InvalidDensityMatrix(
"Bloch vector only defined for qubits".to_string(),
));
}
// ρ = (I + r·σ)/2
// r_x = 2 Re(ρ₀₁), r_y = 2 Im(ρ₀₁), r_z = ρ₀₀ - ρ₁₁
let rho_01 = self.matrix.get(0, 1);
let rx = 2.0 * rho_01.re;
let ry = 2.0 * rho_01.im;
let rz = self.matrix.get(0, 0).re - self.matrix.get(1, 1).re;
Ok([rx, ry, rz])
}
/// Create a qubit density matrix from Bloch vector
pub fn from_bloch_vector(r: [f64; 3]) -> Result<Self> {
let [rx, ry, rz] = r;
// Check |r| ≤ 1
let norm = (rx * rx + ry * ry + rz * rz).sqrt();
if norm > 1.0 + constants::EPSILON {
return Err(QuantumTopologyError::InvalidDensityMatrix(
"Bloch vector magnitude must be ≤ 1".to_string(),
));
}
// ρ = (I + r·σ)/2 = [[1+rz, rx-iry], [rx+iry, 1-rz]]/2
let matrix = ComplexMatrix::new(
vec![
Complex64::new((1.0 + rz) / 2.0, 0.0),
Complex64::new(rx / 2.0, -ry / 2.0),
Complex64::new(rx / 2.0, ry / 2.0),
Complex64::new((1.0 - rz) / 2.0, 0.0),
],
2,
2,
);
Ok(Self { matrix })
}
/// Add two density matrices (for mixtures)
/// Note: result may not be normalized
pub fn add(&self, other: &DensityMatrix) -> Result<Self> {
if self.dimension() != other.dimension() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension(),
got: other.dimension(),
});
}
Ok(Self {
matrix: self.matrix.add(&other.matrix),
})
}
/// Scale the density matrix
pub fn scale(&self, factor: f64) -> Self {
Self {
matrix: self.matrix.scale(Complex64::new(factor, 0.0)),
}
}
/// Compute the spectral decomposition once, caching eigenvalues for reuse.
///
/// This is the recommended way to compute multiple spectral quantities
/// (entropy, purity, effective rank) from a single eigenvalue computation.
/// Each call to `von_neumann_entropy()` or `purity()` separately costs O(n³);
/// using this method computes everything in a single O(n³) pass.
pub fn spectral_decomposition(&self) -> SpectralDecomposition {
let eigenvalues: Vec<f64> = self
.matrix
.eigenvalues(constants::MAX_ITERATIONS, constants::DEFAULT_TOLERANCE)
.into_iter()
.map(|ev| ev.re.max(0.0)) // Clamp to non-negative (numerical safety)
.collect();
// Entropy: S = -Σ λ_i ln(λ_i)
let entropy = eigenvalues
.iter()
.filter(|&&lambda| lambda > constants::EPSILON)
.map(|&lambda| -lambda * lambda.ln())
.sum();
// Purity: Tr(ρ²) = Σ λ_i²
let purity = eigenvalues.iter().map(|&lambda| lambda * lambda).sum();
// Effective rank
let effective_rank = eigenvalues
.iter()
.filter(|&&lambda| lambda > constants::EPSILON)
.count();
SpectralDecomposition {
eigenvalues,
entropy,
purity,
effective_rank,
}
}
/// Compute purity using Frobenius norm: Tr(ρ²) = ||ρ||²_F for Hermitian ρ.
///
/// This is O(n²) compared to the O(n³) matmul-based approach.
pub fn purity_fast(&self) -> f64 {
self.matrix.data.iter().map(|c| c.norm_sqr()).sum()
}
/// Compute von Neumann entropy from pre-computed eigenvalues.
pub fn entropy_from_eigenvalues(eigenvalues: &[f64]) -> f64 {
eigenvalues
.iter()
.filter(|&&lambda| lambda > constants::EPSILON)
.map(|&lambda| -lambda * lambda.ln())
.sum()
}
/// Compute purity from pre-computed eigenvalues: Tr(ρ²) = Σ λ_i².
pub fn purity_from_eigenvalues(eigenvalues: &[f64]) -> f64 {
eigenvalues.iter().map(|&lambda| lambda * lambda).sum()
}
}
/// Pre-computed spectral decomposition of a density matrix.
///
/// Computing eigenvalues is O(n³). This struct caches the result so that
/// entropy, purity, trace distance, and other spectral quantities can be
/// derived in O(n) from the same decomposition.
#[derive(Debug, Clone)]
pub struct SpectralDecomposition {
/// Real eigenvalues (density matrices are Hermitian → real eigenvalues)
pub eigenvalues: Vec<f64>,
/// Von Neumann entropy: S(ρ) = -Σ λ_i ln(λ_i)
pub entropy: f64,
/// Purity: Tr(ρ²) = Σ λ_i²
pub purity: f64,
/// Effective rank (number of eigenvalues above EPSILON)
pub effective_rank: usize,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pure_state_density_matrix() {
let state = QuantumState::ground_state(1);
let rho = DensityMatrix::from_pure_state(&state);
// Pure state has purity 1
assert!((rho.purity() - 1.0).abs() < 1e-10);
// Pure state has zero entropy
assert!(rho.von_neumann_entropy().abs() < 1e-10);
}
#[test]
fn test_maximally_mixed_state() {
let rho = DensityMatrix::maximally_mixed(2);
// Trace = 1
assert!((rho.trace().re - 1.0).abs() < 1e-10);
// Purity = 1/d for maximally mixed
assert!((rho.purity() - 0.5).abs() < 1e-10);
// Entropy = log(d) for maximally mixed
let expected_entropy = 2.0_f64.ln();
assert!((rho.von_neumann_entropy() - expected_entropy).abs() < 1e-5);
}
#[test]
fn test_fidelity() {
let rho = DensityMatrix::from_pure_state(&QuantumState::ground_state(1));
let sigma = DensityMatrix::maximally_mixed(2);
// Fidelity with itself is 1
let self_fidelity = rho.fidelity(&rho).unwrap();
assert!((self_fidelity - 1.0).abs() < 1e-10);
// Fidelity between |0⟩ and maximally mixed
let fid = rho.fidelity(&sigma).unwrap();
assert!(fid > 0.0 && fid < 1.0);
}
#[test]
fn test_trace_distance() {
let rho = DensityMatrix::from_pure_state(&QuantumState::basis_state(2, 0).unwrap());
let sigma = DensityMatrix::from_pure_state(&QuantumState::basis_state(2, 1).unwrap());
// Orthogonal pure states have trace distance 1
let dist = rho.trace_distance(&sigma).unwrap();
assert!((dist - 1.0).abs() < 1e-5);
// Same state has trace distance 0
let self_dist = rho.trace_distance(&rho).unwrap();
assert!(self_dist < 1e-10);
}
#[test]
fn test_bloch_vector() {
// |0⟩ state has Bloch vector (0, 0, 1)
let rho_0 = DensityMatrix::from_pure_state(&QuantumState::basis_state(2, 0).unwrap());
let bloch = rho_0.bloch_vector().unwrap();
assert!((bloch[2] - 1.0).abs() < 1e-10);
// |1⟩ state has Bloch vector (0, 0, -1)
let rho_1 = DensityMatrix::from_pure_state(&QuantumState::basis_state(2, 1).unwrap());
let bloch = rho_1.bloch_vector().unwrap();
assert!((bloch[2] + 1.0).abs() < 1e-10);
// Maximally mixed has Bloch vector (0, 0, 0)
let rho_mm = DensityMatrix::maximally_mixed(2);
let bloch = rho_mm.bloch_vector().unwrap();
assert!(bloch.iter().all(|x| x.abs() < 1e-10));
}
#[test]
fn test_partial_trace() {
// Create a product state |00⟩
let state_00 = QuantumState::basis_state(4, 0).unwrap();
let rho_ab = DensityMatrix::from_pure_state(&state_00);
// Partial trace over B should give |0⟩⟨0|
let rho_a = rho_ab.partial_trace_b(2, 2).unwrap();
assert_eq!(rho_a.dimension(), 2);
assert!((rho_a.purity() - 1.0).abs() < 1e-10);
}
#[test]
fn test_tensor_product() {
let rho_0 = DensityMatrix::from_pure_state(&QuantumState::basis_state(2, 0).unwrap());
let rho_1 = DensityMatrix::from_pure_state(&QuantumState::basis_state(2, 1).unwrap());
let rho_01 = rho_0.tensor(&rho_1);
assert_eq!(rho_01.dimension(), 4);
// Should be |01⟩⟨01|
assert!((rho_01.matrix.get(1, 1).re - 1.0).abs() < 1e-10);
}
#[test]
fn test_spectral_decomposition() {
// Pure state: eigenvalues should be [1, 0], entropy 0, purity 1
let rho = DensityMatrix::from_pure_state(&QuantumState::ground_state(1));
let spectral = rho.spectral_decomposition();
assert!((spectral.purity - 1.0).abs() < 1e-5);
assert!(spectral.entropy.abs() < 1e-5);
assert_eq!(spectral.effective_rank, 1);
}
#[test]
fn test_spectral_decomposition_mixed() {
// Maximally mixed 2-qubit: eigenvalues [0.5, 0.5], entropy = ln(2), purity = 0.5
let rho = DensityMatrix::maximally_mixed(2);
let spectral = rho.spectral_decomposition();
assert!((spectral.purity - 0.5).abs() < 1e-5);
assert!((spectral.entropy - 2.0_f64.ln()).abs() < 1e-3);
assert_eq!(spectral.effective_rank, 2);
}
#[test]
fn test_purity_fast() {
let rho = DensityMatrix::from_pure_state(&QuantumState::ground_state(1));
let purity_fast = rho.purity_fast();
let purity_orig = rho.purity();
assert!((purity_fast - purity_orig).abs() < 1e-10);
}
#[test]
fn test_purity_fast_mixed() {
let rho = DensityMatrix::maximally_mixed(2);
let purity_fast = rho.purity_fast();
let purity_orig = rho.purity();
assert!((purity_fast - purity_orig).abs() < 1e-10);
}
}

View File

@@ -0,0 +1,146 @@
//! # Quantum/Algebraic Topology Module
//!
//! This module provides quantum computing primitives and algebraic topology tools
//! for the Prime-Radiant coherence engine. It enables:
//!
//! - **Quantum State Simulation**: Pure states, density matrices, and quantum channels
//! - **Topological Invariants**: Betti numbers, Euler characteristic, homology groups
//! - **Persistent Homology**: Track topological features across filtration scales
//! - **Topological Quantum Encoding**: Structure-preserving quantum encodings
//! - **Coherence Integration**: Quantum and topological measures of structural coherence
//!
//! ## Mathematical Foundation
//!
//! The module bridges quantum mechanics and algebraic topology to provide
//! structure-preserving coherence measures:
//!
//! - **Quantum Fidelity**: F(ρ, σ) = (Tr√(√ρ σ √ρ))² measures state similarity
//! - **Topological Energy**: Uses Betti numbers and persistence to quantify structure
//! - **Sheaf Cohomology**: Connects topological invariants to coherence residuals
//!
//! ## Design Philosophy
//!
//! This is a **classical simulation** of quantum concepts, designed for:
//! 1. Numerical stability (using `num-complex` for complex arithmetic)
//! 2. No external quantum hardware requirements
//! 3. Integration with Prime-Radiant's sheaf-theoretic framework
//! 4. WASM compatibility (pure Rust, no system dependencies)
#![allow(dead_code)]
pub mod complex_matrix;
pub mod quantum_state;
pub mod density_matrix;
pub mod quantum_channel;
pub mod topological_invariant;
pub mod persistent_homology;
pub mod simplicial_complex;
pub mod topological_code;
pub mod coherence_integration;
// Re-exports for convenient access
pub use complex_matrix::{ComplexMatrix, ComplexVector};
pub use quantum_state::{QuantumState, QuantumBasis, Qubit};
pub use density_matrix::{DensityMatrix, MixedState};
pub use quantum_channel::{QuantumChannel, KrausOperator, PauliOperator, PauliType};
pub use topological_invariant::{
TopologicalInvariant, HomologyGroup, CohomologyGroup, Cocycle,
};
pub use persistent_homology::{
PersistenceDiagram, BirthDeathPair, PersistentHomologyComputer,
};
pub use simplicial_complex::{
Simplex, SimplicialComplex, SparseMatrix, BoundaryMatrix,
};
pub use topological_code::{
TopologicalCode, StabilizerCode, GraphState, StructurePreservingEncoder,
SolverBackedOperator,
};
pub use coherence_integration::{
TopologicalEnergy, TopologicalCoherenceAnalyzer, QuantumCoherenceMetric,
};
/// Error type for quantum/topology operations
#[derive(Debug, Clone, PartialEq)]
pub enum QuantumTopologyError {
/// Dimension mismatch between operands
DimensionMismatch { expected: usize, got: usize },
/// Invalid quantum state (not normalized)
InvalidQuantumState(String),
/// Invalid density matrix (not positive semidefinite or trace != 1)
InvalidDensityMatrix(String),
/// Invalid quantum channel (Kraus operators don't sum to identity)
InvalidQuantumChannel(String),
/// Singular matrix encountered
SingularMatrix,
/// Invalid simplex specification
InvalidSimplex(String),
/// Invalid topological code
InvalidTopologicalCode(String),
/// Computation failed to converge
ConvergenceFailure { iterations: usize, tolerance: f64 },
/// General numerical error
NumericalError(String),
}
impl std::fmt::Display for QuantumTopologyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DimensionMismatch { expected, got } => {
write!(f, "Dimension mismatch: expected {}, got {}", expected, got)
}
Self::InvalidQuantumState(msg) => write!(f, "Invalid quantum state: {}", msg),
Self::InvalidDensityMatrix(msg) => write!(f, "Invalid density matrix: {}", msg),
Self::InvalidQuantumChannel(msg) => write!(f, "Invalid quantum channel: {}", msg),
Self::SingularMatrix => write!(f, "Singular matrix encountered"),
Self::InvalidSimplex(msg) => write!(f, "Invalid simplex: {}", msg),
Self::InvalidTopologicalCode(msg) => write!(f, "Invalid topological code: {}", msg),
Self::ConvergenceFailure { iterations, tolerance } => {
write!(f, "Failed to converge after {} iterations (tol={})", iterations, tolerance)
}
Self::NumericalError(msg) => write!(f, "Numerical error: {}", msg),
}
}
}
impl std::error::Error for QuantumTopologyError {}
/// Result type for quantum/topology operations
pub type Result<T> = std::result::Result<T, QuantumTopologyError>;
/// Constants used throughout the module
pub mod constants {
/// Numerical tolerance for floating point comparisons
pub const EPSILON: f64 = 1e-10;
/// Maximum iterations for iterative algorithms
pub const MAX_ITERATIONS: usize = 1000;
/// Default convergence tolerance
pub const DEFAULT_TOLERANCE: f64 = 1e-8;
/// Pi constant
pub const PI: f64 = std::f64::consts::PI;
/// Euler's number
pub const E: f64 = std::f64::consts::E;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_display() {
let err = QuantumTopologyError::DimensionMismatch { expected: 4, got: 8 };
assert!(err.to_string().contains("expected 4"));
assert!(err.to_string().contains("got 8"));
}
#[test]
fn test_constants() {
assert!(constants::EPSILON > 0.0);
assert!(constants::MAX_ITERATIONS > 0);
assert!((constants::PI - std::f64::consts::PI).abs() < 1e-15);
}
}

View File

@@ -0,0 +1,730 @@
//! Persistent Homology
//!
//! Computes persistent homology using the standard algorithm, tracking birth-death
//! pairs of topological features across filtration scales.
use super::simplicial_complex::{Simplex, SimplicialComplex, SparseMatrix};
use super::{constants, QuantumTopologyError, Result};
use std::collections::{HashMap, HashSet, BTreeMap};
/// Birth-death pair representing a persistent feature
#[derive(Debug, Clone, PartialEq)]
pub struct BirthDeathPair {
/// Dimension of the feature (0 = component, 1 = loop, 2 = void, ...)
pub dimension: usize,
/// Birth time (filtration value when feature appears)
pub birth: f64,
/// Death time (None = essential feature that never dies)
pub death: Option<f64>,
/// Representative cycle (simplex that created the feature)
pub birth_simplex: Option<Simplex>,
/// Killing simplex (simplex that killed the feature)
pub death_simplex: Option<Simplex>,
}
impl BirthDeathPair {
/// Create a finite-lifetime feature
pub fn finite(
dimension: usize,
birth: f64,
death: f64,
birth_simplex: Option<Simplex>,
death_simplex: Option<Simplex>,
) -> Self {
Self {
dimension,
birth,
death: Some(death),
birth_simplex,
death_simplex,
}
}
/// Create an essential (infinite-lifetime) feature
pub fn essential(dimension: usize, birth: f64, birth_simplex: Option<Simplex>) -> Self {
Self {
dimension,
birth,
death: None,
birth_simplex,
death_simplex: None,
}
}
/// Persistence (lifetime) of the feature
pub fn persistence(&self) -> f64 {
match self.death {
Some(d) => d - self.birth,
None => f64::INFINITY,
}
}
/// Check if this is an essential feature
pub fn is_essential(&self) -> bool {
self.death.is_none()
}
/// Midpoint of the interval
pub fn midpoint(&self) -> f64 {
match self.death {
Some(d) => (self.birth + d) / 2.0,
None => f64::INFINITY,
}
}
/// Check if the feature is alive at time t
pub fn is_alive_at(&self, t: f64) -> bool {
self.birth <= t && self.death.map(|d| d > t).unwrap_or(true)
}
}
/// Persistence diagram: collection of birth-death pairs
#[derive(Debug, Clone)]
pub struct PersistenceDiagram {
/// Birth-death pairs
pub pairs: Vec<BirthDeathPair>,
/// Maximum dimension computed
pub max_dimension: usize,
}
impl PersistenceDiagram {
/// Create an empty diagram
pub fn new() -> Self {
Self {
pairs: Vec::new(),
max_dimension: 0,
}
}
/// Add a birth-death pair
pub fn add(&mut self, pair: BirthDeathPair) {
self.max_dimension = self.max_dimension.max(pair.dimension);
self.pairs.push(pair);
}
/// Get pairs of dimension k
pub fn pairs_of_dim(&self, k: usize) -> impl Iterator<Item = &BirthDeathPair> {
self.pairs.iter().filter(move |p| p.dimension == k)
}
/// Get Betti numbers at filtration value t
pub fn betti_at(&self, t: f64) -> Vec<usize> {
let mut betti = vec![0; self.max_dimension + 1];
for pair in &self.pairs {
if pair.is_alive_at(t) && pair.dimension <= self.max_dimension {
betti[pair.dimension] += 1;
}
}
betti
}
/// Total persistence (sum of all finite lifetimes)
pub fn total_persistence(&self) -> f64 {
self.pairs
.iter()
.filter(|p| !p.is_essential())
.map(|p| p.persistence())
.sum()
}
/// Total persistence in dimension k
pub fn total_persistence_dim(&self, k: usize) -> f64 {
self.pairs
.iter()
.filter(|p| p.dimension == k && !p.is_essential())
.map(|p| p.persistence())
.sum()
}
/// Average persistence
pub fn average_persistence(&self) -> f64 {
let finite: Vec<f64> = self
.pairs
.iter()
.filter(|p| !p.is_essential())
.map(|p| p.persistence())
.collect();
if finite.is_empty() {
0.0
} else {
finite.iter().sum::<f64>() / finite.len() as f64
}
}
/// Maximum persistence (excluding essential features)
pub fn max_persistence(&self) -> f64 {
self.pairs
.iter()
.filter(|p| !p.is_essential())
.map(|p| p.persistence())
.fold(0.0, f64::max)
}
/// Filter by minimum persistence threshold
pub fn filter_by_persistence(&self, threshold: f64) -> Self {
Self {
pairs: self
.pairs
.iter()
.filter(|p| p.persistence() >= threshold)
.cloned()
.collect(),
max_dimension: self.max_dimension,
}
}
/// Number of features of each dimension
pub fn feature_counts(&self) -> Vec<usize> {
let mut counts = vec![0; self.max_dimension + 1];
for pair in &self.pairs {
if pair.dimension <= self.max_dimension {
counts[pair.dimension] += 1;
}
}
counts
}
/// Number of essential features
pub fn essential_count(&self) -> usize {
self.pairs.iter().filter(|p| p.is_essential()).count()
}
/// Persistence landscape at time t and level k
pub fn landscape(&self, dim: usize, t: f64, level: usize) -> f64 {
// Get all pairs of given dimension
let mut values: Vec<f64> = self
.pairs_of_dim(dim)
.filter_map(|p| {
if let Some(death) = p.death {
let mid = (p.birth + death) / 2.0;
let half_life = (death - p.birth) / 2.0;
if t >= p.birth && t <= death {
// Triangle function
let value = if t <= mid {
t - p.birth
} else {
death - t
};
Some(value)
} else {
None
}
} else {
// Essential feature - extends to infinity
if t >= p.birth {
Some(t - p.birth)
} else {
None
}
}
})
.collect();
// Sort descending
values.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
// Return k-th largest (0-indexed)
values.get(level).copied().unwrap_or(0.0)
}
/// Bottleneck distance to another diagram (same dimension)
pub fn bottleneck_distance(&self, other: &PersistenceDiagram, dim: usize) -> f64 {
let pairs_self: Vec<&BirthDeathPair> = self.pairs_of_dim(dim).collect();
let pairs_other: Vec<&BirthDeathPair> = other.pairs_of_dim(dim).collect();
// Simple approximation using greedy matching
let mut max_dist = 0.0_f64;
// For each pair in self, find closest in other
for p1 in &pairs_self {
let mut min_dist = f64::INFINITY;
for p2 in &pairs_other {
let dist = l_infinity_distance(p1, p2);
min_dist = min_dist.min(dist);
}
// Also consider matching to diagonal
let diag_dist = p1.persistence() / 2.0;
min_dist = min_dist.min(diag_dist);
max_dist = max_dist.max(min_dist);
}
// Vice versa
for p2 in &pairs_other {
let mut min_dist = f64::INFINITY;
for p1 in &pairs_self {
let dist = l_infinity_distance(p1, p2);
min_dist = min_dist.min(dist);
}
let diag_dist = p2.persistence() / 2.0;
min_dist = min_dist.min(diag_dist);
max_dist = max_dist.max(min_dist);
}
max_dist
}
/// Wasserstein distance (q=2) to another diagram
pub fn wasserstein_distance(&self, other: &PersistenceDiagram, dim: usize) -> f64 {
let pairs_self: Vec<&BirthDeathPair> = self.pairs_of_dim(dim).collect();
let pairs_other: Vec<&BirthDeathPair> = other.pairs_of_dim(dim).collect();
// Use greedy approximation
let n = pairs_self.len();
let m = pairs_other.len();
if n == 0 && m == 0 {
return 0.0;
}
let mut total = 0.0;
// Sum of squared persistence for unmatched points (to diagonal)
for p in &pairs_self {
if !p.is_essential() {
let diag_dist = p.persistence() / 2.0;
total += diag_dist * diag_dist;
}
}
for p in &pairs_other {
if !p.is_essential() {
let diag_dist = p.persistence() / 2.0;
total += diag_dist * diag_dist;
}
}
total.sqrt()
}
}
impl Default for PersistenceDiagram {
fn default() -> Self {
Self::new()
}
}
/// L-infinity distance between two birth-death pairs
fn l_infinity_distance(p1: &BirthDeathPair, p2: &BirthDeathPair) -> f64 {
let birth_diff = (p1.birth - p2.birth).abs();
let death_diff = match (p1.death, p2.death) {
(Some(d1), Some(d2)) => (d1 - d2).abs(),
(None, None) => 0.0,
_ => f64::INFINITY,
};
birth_diff.max(death_diff)
}
/// Filtration: sequence of simplicial complexes
#[derive(Debug, Clone)]
pub struct Filtration {
/// Simplices with their birth times
pub simplices: Vec<FilteredSimplex>,
}
/// Simplex with birth time in filtration
#[derive(Debug, Clone)]
pub struct FilteredSimplex {
/// The simplex
pub simplex: Simplex,
/// Birth time (filtration value)
pub birth: f64,
}
impl Filtration {
/// Create empty filtration
pub fn new() -> Self {
Self {
simplices: Vec::new(),
}
}
/// Add a simplex at given birth time
pub fn add(&mut self, simplex: Simplex, birth: f64) {
self.simplices.push(FilteredSimplex { simplex, birth });
}
/// Sort simplices by birth time, then by dimension
pub fn sort(&mut self) {
self.simplices.sort_by(|a, b| {
a.birth
.partial_cmp(&b.birth)
.unwrap_or(std::cmp::Ordering::Equal)
.then_with(|| a.simplex.dim().cmp(&b.simplex.dim()))
});
}
/// Get the simplicial complex at filtration value t
pub fn complex_at(&self, t: f64) -> SimplicialComplex {
SimplicialComplex::from_simplices(
self.simplices
.iter()
.filter(|fs| fs.birth <= t)
.map(|fs| fs.simplex.clone()),
)
}
}
impl Default for Filtration {
fn default() -> Self {
Self::new()
}
}
/// Vietoris-Rips filtration builder
pub struct VietorisRipsFiltration {
/// Maximum simplex dimension
pub max_dimension: usize,
/// Maximum filtration value
pub max_radius: f64,
}
impl VietorisRipsFiltration {
/// Create a new VR filtration builder
pub fn new(max_dimension: usize, max_radius: f64) -> Self {
Self {
max_dimension,
max_radius,
}
}
/// Build filtration from point cloud
pub fn build(&self, points: &[Vec<f64>]) -> Filtration {
let n = points.len();
let mut filtration = Filtration::new();
// Add vertices at t=0
for i in 0..n {
filtration.add(Simplex::vertex(i), 0.0);
}
// Compute pairwise distances
let mut edges: Vec<(usize, usize, f64)> = Vec::new();
for i in 0..n {
for j in (i + 1)..n {
let dist = euclidean_distance(&points[i], &points[j]);
if dist <= self.max_radius * 2.0 {
edges.push((i, j, dist));
}
}
}
// Sort edges by distance
edges.sort_by(|a, b| a.2.partial_cmp(&b.2).unwrap_or(std::cmp::Ordering::Equal));
// Build adjacency list
let mut adj: Vec<HashSet<usize>> = vec![HashSet::new(); n];
// Add edges and higher simplices
for (i, j, dist) in edges {
let birth = dist / 2.0; // Diameter is 2*radius
// Add edge
filtration.add(Simplex::edge(i, j), birth);
adj[i].insert(j);
adj[j].insert(i);
if self.max_dimension >= 2 {
// Find triangles
let common: Vec<usize> = adj[i]
.intersection(&adj[j])
.copied()
.collect();
for k in common {
filtration.add(Simplex::triangle(i, j, k), birth);
if self.max_dimension >= 3 {
// Find tetrahedra
let common_3: Vec<usize> = adj[i]
.intersection(&adj[j])
.filter(|&&l| adj[k].contains(&l) && l != k)
.copied()
.collect();
for l in common_3 {
if l > k {
filtration.add(Simplex::tetrahedron(i, j, k, l), birth);
}
}
}
}
}
}
filtration.sort();
filtration
}
}
/// Euclidean distance
fn euclidean_distance(a: &[f64], b: &[f64]) -> f64 {
a.iter()
.zip(b.iter())
.map(|(x, y)| (x - y).powi(2))
.sum::<f64>()
.sqrt()
}
/// Persistent homology computation engine
pub struct PersistentHomologyComputer {
/// Maximum dimension to compute
max_dimension: usize,
}
impl PersistentHomologyComputer {
/// Create a new computation engine
pub fn new(max_dimension: usize) -> Self {
Self { max_dimension }
}
/// Compute persistent homology from a filtration
pub fn compute(&self, filtration: &Filtration) -> PersistenceDiagram {
let n = filtration.simplices.len();
if n == 0 {
return PersistenceDiagram::new();
}
// Build simplex to index mapping
let simplex_to_idx: HashMap<&Simplex, usize> = filtration
.simplices
.iter()
.enumerate()
.map(|(i, fs)| (&fs.simplex, i))
.collect();
// Initialize columns (boundary chains)
let mut columns: Vec<Option<HashSet<usize>>> = Vec::with_capacity(n);
let mut birth_times = Vec::with_capacity(n);
let mut dimensions = Vec::with_capacity(n);
let mut simplices_vec = Vec::with_capacity(n);
for fs in &filtration.simplices {
birth_times.push(fs.birth);
dimensions.push(fs.simplex.dim());
simplices_vec.push(fs.simplex.clone());
// Compute boundary
let boundary: HashSet<usize> = fs
.simplex
.boundary_faces()
.into_iter()
.filter_map(|(face, _sign)| simplex_to_idx.get(&face).copied())
.collect();
columns.push(if boundary.is_empty() {
None
} else {
Some(boundary)
});
}
// Reduce matrix using standard algorithm
let mut pivot_to_col: HashMap<usize, usize> = HashMap::new();
for j in 0..n {
while let Some(pivot) = get_pivot(&columns[j]) {
if let Some(&other) = pivot_to_col.get(&pivot) {
// Add column 'other' to column j (mod 2)
add_columns(&mut columns, j, other);
} else {
pivot_to_col.insert(pivot, j);
break;
}
}
}
// Extract persistence pairs
let mut diagram = PersistenceDiagram::new();
let mut paired: HashSet<usize> = HashSet::new();
for (&pivot, &col) in &pivot_to_col {
let birth = birth_times[pivot];
let death = birth_times[col];
let dim = dimensions[pivot];
if death > birth && dim <= self.max_dimension {
diagram.add(BirthDeathPair::finite(
dim,
birth,
death,
Some(simplices_vec[pivot].clone()),
Some(simplices_vec[col].clone()),
));
}
paired.insert(pivot);
paired.insert(col);
}
// Add essential features (unpaired simplices with zero boundary)
for j in 0..n {
if !paired.contains(&j) && columns[j].is_none() {
let dim = dimensions[j];
if dim <= self.max_dimension {
diagram.add(BirthDeathPair::essential(
dim,
birth_times[j],
Some(simplices_vec[j].clone()),
));
}
}
}
diagram
}
/// Compute from point cloud
pub fn compute_from_points(
&self,
points: &[Vec<f64>],
max_radius: f64,
) -> PersistenceDiagram {
let vr = VietorisRipsFiltration::new(self.max_dimension, max_radius);
let filtration = vr.build(points);
self.compute(&filtration)
}
}
/// Get pivot (largest index) from column
fn get_pivot(col: &Option<HashSet<usize>>) -> Option<usize> {
col.as_ref().and_then(|c| c.iter().max().copied())
}
/// Add column src to column dst (XOR / mod 2)
fn add_columns(columns: &mut [Option<HashSet<usize>>], dst: usize, src: usize) {
if let Some(ref src_col) = columns[src].clone() {
if let Some(ref mut dst_col) = columns[dst] {
// Symmetric difference
let mut new_col = HashSet::new();
for &idx in dst_col.iter() {
if !src_col.contains(&idx) {
new_col.insert(idx);
}
}
for &idx in src_col.iter() {
if !dst_col.contains(&idx) {
new_col.insert(idx);
}
}
if new_col.is_empty() {
columns[dst] = None;
} else {
*dst_col = new_col;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_birth_death_pair() {
let finite = BirthDeathPair::finite(0, 0.0, 1.0, None, None);
assert_eq!(finite.persistence(), 1.0);
assert!(!finite.is_essential());
assert!(finite.is_alive_at(0.5));
assert!(!finite.is_alive_at(1.5));
let essential = BirthDeathPair::essential(0, 0.0, None);
assert!(essential.is_essential());
assert_eq!(essential.persistence(), f64::INFINITY);
assert!(essential.is_alive_at(1000.0));
}
#[test]
fn test_persistence_diagram() {
let mut diagram = PersistenceDiagram::new();
diagram.add(BirthDeathPair::essential(0, 0.0, None));
diagram.add(BirthDeathPair::finite(0, 0.0, 1.0, None, None));
diagram.add(BirthDeathPair::finite(1, 0.5, 2.0, None, None));
assert_eq!(diagram.pairs.len(), 3);
assert_eq!(diagram.essential_count(), 1);
let betti = diagram.betti_at(0.75);
assert_eq!(betti[0], 2); // Both H0 features alive
assert_eq!(betti[1], 1); // H1 feature alive
assert!((diagram.total_persistence() - 2.5).abs() < 1e-10); // 1.0 + 1.5
}
#[test]
fn test_filtration() {
let mut filtration = Filtration::new();
filtration.add(Simplex::vertex(0), 0.0);
filtration.add(Simplex::vertex(1), 0.0);
filtration.add(Simplex::edge(0, 1), 1.0);
filtration.sort();
let complex = filtration.complex_at(0.5);
assert_eq!(complex.count(0), 2);
assert_eq!(complex.count(1), 0);
let complex = filtration.complex_at(1.5);
assert_eq!(complex.count(1), 1);
}
#[test]
fn test_persistent_homology_simple() {
// Two points that merge
let points = vec![vec![0.0, 0.0], vec![1.0, 0.0]];
let computer = PersistentHomologyComputer::new(1);
let diagram = computer.compute_from_points(&points, 1.0);
// Should have:
// - One essential H0 (final connected component)
// - One finite H0 that dies when edge connects
let h0_pairs: Vec<_> = diagram.pairs_of_dim(0).collect();
assert!(h0_pairs.len() >= 1);
}
#[test]
fn test_persistent_homology_triangle() {
// Three points forming equilateral triangle
let points = vec![
vec![0.0, 0.0],
vec![1.0, 0.0],
vec![0.5, 0.866],
];
let computer = PersistentHomologyComputer::new(2);
let diagram = computer.compute_from_points(&points, 1.0);
// Should have H0 and possibly H1 features
assert!(!diagram.pairs.is_empty());
}
#[test]
fn test_bottleneck_distance() {
let mut d1 = PersistenceDiagram::new();
d1.add(BirthDeathPair::finite(0, 0.0, 1.0, None, None));
let mut d2 = PersistenceDiagram::new();
d2.add(BirthDeathPair::finite(0, 0.0, 2.0, None, None));
let dist = d1.bottleneck_distance(&d2, 0);
assert!(dist >= 0.0);
}
#[test]
fn test_landscape() {
let mut diagram = PersistenceDiagram::new();
diagram.add(BirthDeathPair::finite(1, 0.0, 2.0, None, None));
// At midpoint t=1, landscape should have maximum
let val = diagram.landscape(1, 1.0, 0);
assert!((val - 1.0).abs() < 1e-10);
// At t=0 or t=2, landscape should be 0
let val_0 = diagram.landscape(1, 0.0, 0);
assert!(val_0.abs() < 1e-10);
}
}

View File

@@ -0,0 +1,711 @@
//! Quantum Channels and Operations
//!
//! Implements quantum channels using Kraus operator representation,
//! Pauli operators, and common quantum operations.
use super::complex_matrix::{gates, Complex64, ComplexMatrix};
use super::density_matrix::DensityMatrix;
use super::{constants, QuantumTopologyError, Result};
/// Type of Pauli operator
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PauliType {
/// Identity I
I,
/// Pauli X
X,
/// Pauli Y
Y,
/// Pauli Z
Z,
}
impl PauliType {
/// Get the matrix representation
pub fn to_matrix(&self) -> ComplexMatrix {
match self {
PauliType::I => ComplexMatrix::identity(2),
PauliType::X => gates::pauli_x(),
PauliType::Y => gates::pauli_y(),
PauliType::Z => gates::pauli_z(),
}
}
/// Get the eigenvalues
pub fn eigenvalues(&self) -> [f64; 2] {
match self {
PauliType::I => [1.0, 1.0],
PauliType::X | PauliType::Y | PauliType::Z => [1.0, -1.0],
}
}
/// Commutator type with another Pauli
pub fn commutes_with(&self, other: &PauliType) -> bool {
// Identity commutes with everything
if *self == PauliType::I || *other == PauliType::I {
return true;
}
// Same Pauli commutes with itself
self == other
}
}
/// Pauli operator on multiple qubits
#[derive(Debug, Clone, PartialEq)]
pub struct PauliOperator {
/// Pauli types for each qubit (I, X, Y, or Z)
pub paulis: Vec<PauliType>,
/// Overall phase factor (±1, ±i)
pub phase: Complex64,
}
impl PauliOperator {
/// Create a new Pauli operator
pub fn new(paulis: Vec<PauliType>) -> Self {
Self {
paulis,
phase: Complex64::new(1.0, 0.0),
}
}
/// Create a Pauli operator with phase
pub fn with_phase(paulis: Vec<PauliType>, phase: Complex64) -> Self {
Self { paulis, phase }
}
/// Create identity operator on n qubits
pub fn identity(num_qubits: usize) -> Self {
Self {
paulis: vec![PauliType::I; num_qubits],
phase: Complex64::new(1.0, 0.0),
}
}
/// Create a single-qubit Pauli operator on a multi-qubit system
pub fn single_qubit(num_qubits: usize, target: usize, pauli: PauliType) -> Self {
let mut paulis = vec![PauliType::I; num_qubits];
if target < num_qubits {
paulis[target] = pauli;
}
Self::new(paulis)
}
/// Number of qubits
pub fn num_qubits(&self) -> usize {
self.paulis.len()
}
/// Check if this is the identity operator
pub fn is_identity(&self) -> bool {
(self.phase.re - 1.0).abs() < constants::EPSILON
&& self.phase.im.abs() < constants::EPSILON
&& self.paulis.iter().all(|p| *p == PauliType::I)
}
/// Get the matrix representation
pub fn to_matrix(&self) -> ComplexMatrix {
if self.paulis.is_empty() {
return ComplexMatrix::identity(1).scale(self.phase);
}
let mut result = self.paulis[0].to_matrix();
for pauli in &self.paulis[1..] {
result = result.tensor(&pauli.to_matrix());
}
result.scale(self.phase)
}
/// Multiply two Pauli operators
pub fn multiply(&self, other: &PauliOperator) -> Result<Self> {
if self.num_qubits() != other.num_qubits() {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.num_qubits(),
got: other.num_qubits(),
});
}
let mut result_paulis = Vec::with_capacity(self.num_qubits());
let mut phase = self.phase * other.phase;
for (p1, p2) in self.paulis.iter().zip(other.paulis.iter()) {
let (new_pauli, local_phase) = multiply_single_paulis(*p1, *p2);
result_paulis.push(new_pauli);
phase *= local_phase;
}
Ok(Self::with_phase(result_paulis, phase))
}
/// Check if two Pauli operators commute
pub fn commutes_with(&self, other: &PauliOperator) -> bool {
if self.num_qubits() != other.num_qubits() {
return false;
}
// Count anticommuting pairs
let mut anticommute_count = 0;
for (p1, p2) in self.paulis.iter().zip(other.paulis.iter()) {
if !p1.commutes_with(p2) {
anticommute_count += 1;
}
}
// Operators commute if there's an even number of anticommuting pairs
anticommute_count % 2 == 0
}
/// Weight of the Pauli operator (number of non-identity terms)
pub fn weight(&self) -> usize {
self.paulis.iter().filter(|p| **p != PauliType::I).count()
}
/// Support of the Pauli operator (indices of non-identity terms)
pub fn support(&self) -> Vec<usize> {
self.paulis
.iter()
.enumerate()
.filter(|(_, p)| **p != PauliType::I)
.map(|(i, _)| i)
.collect()
}
}
/// Multiply two single-qubit Paulis and return the result with phase
fn multiply_single_paulis(p1: PauliType, p2: PauliType) -> (PauliType, Complex64) {
use PauliType::*;
let i = Complex64::new(0.0, 1.0);
let mi = Complex64::new(0.0, -1.0);
let one = Complex64::new(1.0, 0.0);
match (p1, p2) {
(I, p) | (p, I) => (p, one),
(X, X) | (Y, Y) | (Z, Z) => (I, one),
(X, Y) => (Z, i),
(Y, X) => (Z, mi),
(Y, Z) => (X, i),
(Z, Y) => (X, mi),
(Z, X) => (Y, i),
(X, Z) => (Y, mi),
}
}
/// Kraus operator for quantum channels
#[derive(Debug, Clone)]
pub struct KrausOperator {
/// The Kraus operator matrix K_i
pub matrix: ComplexMatrix,
/// Optional label
pub label: Option<String>,
}
impl KrausOperator {
/// Create a new Kraus operator
pub fn new(matrix: ComplexMatrix) -> Self {
Self {
matrix,
label: None,
}
}
/// Create with label
pub fn with_label(matrix: ComplexMatrix, label: &str) -> Self {
Self {
matrix,
label: Some(label.to_string()),
}
}
/// Get dimension
pub fn dimension(&self) -> usize {
self.matrix.rows
}
}
/// Quantum channel represented by Kraus operators
#[derive(Debug, Clone)]
pub struct QuantumChannel {
/// Kraus operators {K_i} such that Σ K_i† K_i = I
pub kraus_operators: Vec<KrausOperator>,
/// Input dimension
pub input_dim: usize,
/// Output dimension
pub output_dim: usize,
}
impl QuantumChannel {
/// Create a new quantum channel from Kraus operators
pub fn new(operators: Vec<ComplexMatrix>) -> Result<Self> {
if operators.is_empty() {
return Err(QuantumTopologyError::InvalidQuantumChannel(
"Channel must have at least one Kraus operator".to_string(),
));
}
let input_dim = operators[0].cols;
let output_dim = operators[0].rows;
// Verify dimensions match
for op in &operators {
if op.cols != input_dim || op.rows != output_dim {
return Err(QuantumTopologyError::InvalidQuantumChannel(
"All Kraus operators must have the same dimensions".to_string(),
));
}
}
let kraus_operators = operators.into_iter().map(KrausOperator::new).collect();
let channel = Self {
kraus_operators,
input_dim,
output_dim,
};
// Verify completeness (Σ K_i† K_i = I)
channel.verify_completeness(constants::EPSILON * 100.0)?;
Ok(channel)
}
/// Create without validation
pub fn new_unchecked(operators: Vec<ComplexMatrix>) -> Self {
let input_dim = operators.first().map(|m| m.cols).unwrap_or(1);
let output_dim = operators.first().map(|m| m.rows).unwrap_or(1);
Self {
kraus_operators: operators.into_iter().map(KrausOperator::new).collect(),
input_dim,
output_dim,
}
}
/// Verify that the Kraus operators satisfy the completeness relation
fn verify_completeness(&self, tolerance: f64) -> Result<()> {
let mut sum = ComplexMatrix::zeros(self.input_dim, self.input_dim);
for k in &self.kraus_operators {
let k_dag_k = k.matrix.adjoint().matmul(&k.matrix);
sum = sum.add(&k_dag_k);
}
// Check if sum ≈ I
let identity = ComplexMatrix::identity(self.input_dim);
for i in 0..self.input_dim {
for j in 0..self.input_dim {
let diff = (sum.get(i, j) - identity.get(i, j)).norm();
if diff > tolerance {
return Err(QuantumTopologyError::InvalidQuantumChannel(format!(
"Completeness relation violated: diff = {} at ({}, {})",
diff, i, j
)));
}
}
}
Ok(())
}
/// Apply the channel to a density matrix: ρ → Σ K_i ρ K_i†
pub fn apply(&self, rho: &DensityMatrix) -> Result<DensityMatrix> {
if rho.dimension() != self.input_dim {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.input_dim,
got: rho.dimension(),
});
}
let mut result = ComplexMatrix::zeros(self.output_dim, self.output_dim);
for k in &self.kraus_operators {
let k_rho = k.matrix.matmul(&rho.matrix);
let k_rho_kdag = k_rho.matmul(&k.matrix.adjoint());
result = result.add(&k_rho_kdag);
}
Ok(DensityMatrix::new_unchecked(result))
}
/// Compose two channels: (E ∘ F)(ρ) = E(F(ρ))
pub fn compose(&self, other: &QuantumChannel) -> Result<Self> {
if self.input_dim != other.output_dim {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.input_dim,
got: other.output_dim,
});
}
// Compose Kraus operators: {E_i F_j}
let mut new_operators = Vec::with_capacity(
self.kraus_operators.len() * other.kraus_operators.len(),
);
for e in &self.kraus_operators {
for f in &other.kraus_operators {
new_operators.push(e.matrix.matmul(&f.matrix));
}
}
Ok(Self::new_unchecked(new_operators))
}
/// Tensor product of channels: (E ⊗ F)(ρ_AB) = E(ρ_A) ⊗ F(ρ_B)
pub fn tensor(&self, other: &QuantumChannel) -> Self {
let mut new_operators = Vec::with_capacity(
self.kraus_operators.len() * other.kraus_operators.len(),
);
for k1 in &self.kraus_operators {
for k2 in &other.kraus_operators {
new_operators.push(k1.matrix.tensor(&k2.matrix));
}
}
Self {
kraus_operators: new_operators.into_iter().map(KrausOperator::new).collect(),
input_dim: self.input_dim * other.input_dim,
output_dim: self.output_dim * other.output_dim,
}
}
/// Create the identity channel
pub fn identity(dim: usize) -> Self {
Self {
kraus_operators: vec![KrausOperator::new(ComplexMatrix::identity(dim))],
input_dim: dim,
output_dim: dim,
}
}
/// Create a unitary channel (single Kraus operator)
pub fn unitary(u: ComplexMatrix) -> Result<Self> {
if !u.is_unitary(constants::EPSILON * 100.0) {
return Err(QuantumTopologyError::InvalidQuantumChannel(
"Matrix is not unitary".to_string(),
));
}
let dim = u.rows;
Ok(Self {
kraus_operators: vec![KrausOperator::new(u)],
input_dim: dim,
output_dim: dim,
})
}
/// Create the depolarizing channel with probability p
/// ρ → (1-p)ρ + (p/3)(XρX + YρY + ZρZ)
pub fn depolarizing(p: f64) -> Self {
let sqrt_1_p = (1.0 - p).sqrt();
let sqrt_p_3 = (p / 3.0).sqrt();
let k0 = ComplexMatrix::identity(2).scale(Complex64::new(sqrt_1_p, 0.0));
let k1 = gates::pauli_x().scale(Complex64::new(sqrt_p_3, 0.0));
let k2 = gates::pauli_y().scale(Complex64::new(sqrt_p_3, 0.0));
let k3 = gates::pauli_z().scale(Complex64::new(sqrt_p_3, 0.0));
Self {
kraus_operators: vec![
KrausOperator::with_label(k0, "I"),
KrausOperator::with_label(k1, "X"),
KrausOperator::with_label(k2, "Y"),
KrausOperator::with_label(k3, "Z"),
],
input_dim: 2,
output_dim: 2,
}
}
/// Create the amplitude damping channel with damping parameter γ
/// Models energy dissipation to environment
pub fn amplitude_damping(gamma: f64) -> Self {
let sqrt_gamma = gamma.sqrt();
let sqrt_1_gamma = (1.0 - gamma).sqrt();
let k0 = ComplexMatrix::new(
vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(sqrt_1_gamma, 0.0),
],
2,
2,
);
let k1 = ComplexMatrix::new(
vec![
Complex64::new(0.0, 0.0),
Complex64::new(sqrt_gamma, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
],
2,
2,
);
Self {
kraus_operators: vec![
KrausOperator::with_label(k0, "K0"),
KrausOperator::with_label(k1, "K1"),
],
input_dim: 2,
output_dim: 2,
}
}
/// Create the phase damping (dephasing) channel with parameter γ
pub fn phase_damping(gamma: f64) -> Self {
let sqrt_1_gamma = (1.0 - gamma).sqrt();
let sqrt_gamma = gamma.sqrt();
let k0 = ComplexMatrix::new(
vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(sqrt_1_gamma, 0.0),
],
2,
2,
);
let k1 = ComplexMatrix::new(
vec![
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(sqrt_gamma, 0.0),
],
2,
2,
);
Self {
kraus_operators: vec![
KrausOperator::with_label(k0, "K0"),
KrausOperator::with_label(k1, "K1"),
],
input_dim: 2,
output_dim: 2,
}
}
/// Create a bit-flip channel with probability p
pub fn bit_flip(p: f64) -> Self {
let sqrt_1_p = (1.0 - p).sqrt();
let sqrt_p = p.sqrt();
let k0 = ComplexMatrix::identity(2).scale(Complex64::new(sqrt_1_p, 0.0));
let k1 = gates::pauli_x().scale(Complex64::new(sqrt_p, 0.0));
Self {
kraus_operators: vec![
KrausOperator::with_label(k0, "I"),
KrausOperator::with_label(k1, "X"),
],
input_dim: 2,
output_dim: 2,
}
}
/// Create a phase-flip channel with probability p
pub fn phase_flip(p: f64) -> Self {
let sqrt_1_p = (1.0 - p).sqrt();
let sqrt_p = p.sqrt();
let k0 = ComplexMatrix::identity(2).scale(Complex64::new(sqrt_1_p, 0.0));
let k1 = gates::pauli_z().scale(Complex64::new(sqrt_p, 0.0));
Self {
kraus_operators: vec![
KrausOperator::with_label(k0, "I"),
KrausOperator::with_label(k1, "Z"),
],
input_dim: 2,
output_dim: 2,
}
}
/// Compute the Choi matrix (channel-state duality)
/// J(E) = (I ⊗ E)(|Ω⟩⟨Ω|) where |Ω⟩ = Σ|ii⟩/√d
pub fn choi_matrix(&self) -> ComplexMatrix {
let d = self.input_dim;
let d2 = d * d;
let mut choi = ComplexMatrix::zeros(d2, d2);
// Build Choi matrix from Kraus operators
for k in &self.kraus_operators {
// Vectorize the Kraus operator using column stacking
for i in 0..d {
for j in 0..d {
for m in 0..d {
for n in 0..d {
let row = i * d + m;
let col = j * d + n;
let val = k.matrix.get(i, j) * k.matrix.get(m, n).conj();
let current = choi.get(row, col);
choi.set(row, col, current + val);
}
}
}
}
}
choi
}
/// Check if the channel is completely positive (always true for Kraus form)
pub fn is_completely_positive(&self) -> bool {
true // Kraus representation guarantees CP
}
/// Check if the channel is trace-preserving
pub fn is_trace_preserving(&self, tolerance: f64) -> bool {
let mut sum = ComplexMatrix::zeros(self.input_dim, self.input_dim);
for k in &self.kraus_operators {
let k_dag_k = k.matrix.adjoint().matmul(&k.matrix);
sum = sum.add(&k_dag_k);
}
let identity = ComplexMatrix::identity(self.input_dim);
for i in 0..self.input_dim {
for j in 0..self.input_dim {
if (sum.get(i, j) - identity.get(i, j)).norm() > tolerance {
return false;
}
}
}
true
}
/// Compute the diamond norm distance to another channel (approximation)
/// ||E - F||_◇ = max_{ρ} ||((E-F)⊗I)(ρ)||_1
pub fn diamond_distance(&self, other: &QuantumChannel) -> Result<f64> {
if self.input_dim != other.input_dim || self.output_dim != other.output_dim {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.input_dim,
got: other.input_dim,
});
}
// Use Choi matrix distance as approximation
let choi_self = self.choi_matrix();
let choi_other = other.choi_matrix();
let diff = choi_self.sub(&choi_other);
// Trace norm of difference
let eigenvalues = diff.eigenvalues(100, 1e-10);
let trace_norm: f64 = eigenvalues.iter().map(|ev| ev.norm()).sum();
Ok(trace_norm)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pauli_multiplication() {
let (result, phase) = multiply_single_paulis(PauliType::X, PauliType::Y);
assert_eq!(result, PauliType::Z);
assert!((phase.im - 1.0).abs() < 1e-10); // i
let (result, phase) = multiply_single_paulis(PauliType::Y, PauliType::X);
assert_eq!(result, PauliType::Z);
assert!((phase.im + 1.0).abs() < 1e-10); // -i
}
#[test]
fn test_pauli_operator() {
let pauli = PauliOperator::new(vec![PauliType::X, PauliType::Z]);
assert_eq!(pauli.weight(), 2);
assert_eq!(pauli.support(), vec![0, 1]);
let matrix = pauli.to_matrix();
assert_eq!(matrix.rows, 4);
}
#[test]
fn test_pauli_commutation() {
let p1 = PauliOperator::new(vec![PauliType::X, PauliType::I]);
let p2 = PauliOperator::new(vec![PauliType::I, PauliType::X]);
// Should commute (act on different qubits)
assert!(p1.commutes_with(&p2));
let p3 = PauliOperator::new(vec![PauliType::X, PauliType::I]);
let p4 = PauliOperator::new(vec![PauliType::Z, PauliType::I]);
// X and Z anticommute
assert!(!p3.commutes_with(&p4));
}
#[test]
fn test_identity_channel() {
let channel = QuantumChannel::identity(2);
let rho = DensityMatrix::maximally_mixed(2);
let result = channel.apply(&rho).unwrap();
assert!((result.purity() - rho.purity()).abs() < 1e-10);
}
#[test]
fn test_depolarizing_channel() {
let channel = QuantumChannel::depolarizing(0.0);
assert!(channel.is_trace_preserving(1e-10));
// p=0 should be identity
let rho = DensityMatrix::from_pure_state(&super::super::quantum_state::QuantumState::ground_state(1));
let result = channel.apply(&rho).unwrap();
assert!((result.fidelity(&rho).unwrap() - 1.0).abs() < 1e-5);
}
#[test]
fn test_amplitude_damping() {
let channel = QuantumChannel::amplitude_damping(1.0);
assert!(channel.is_trace_preserving(1e-10));
// γ=1 should map everything to |0⟩
let rho = DensityMatrix::from_pure_state(&super::super::quantum_state::QuantumState::basis_state(2, 1).unwrap());
let result = channel.apply(&rho).unwrap();
// Should be close to |0⟩⟨0|
assert!((result.matrix.get(0, 0).re - 1.0).abs() < 1e-10);
}
#[test]
fn test_channel_composition() {
let c1 = QuantumChannel::bit_flip(0.1);
let c2 = QuantumChannel::phase_flip(0.1);
let composed = c1.compose(&c2).unwrap();
assert!(composed.is_trace_preserving(1e-10));
}
#[test]
fn test_channel_tensor() {
let c1 = QuantumChannel::identity(2);
let c2 = QuantumChannel::depolarizing(0.1);
let tensor = c1.tensor(&c2);
assert_eq!(tensor.input_dim, 4);
assert!(tensor.is_trace_preserving(1e-10));
}
#[test]
fn test_choi_matrix() {
let channel = QuantumChannel::identity(2);
let choi = channel.choi_matrix();
// Choi matrix of identity channel on d-dim space has trace d
assert_eq!(choi.rows, 4);
// For trace-preserving channel, trace(Choi) = input_dim
// The trace here depends on the specific implementation
assert!(choi.trace().re > 0.0);
}
}

View File

@@ -0,0 +1,674 @@
//! Quantum State Representation
//!
//! Pure quantum states represented as normalized complex vectors in Hilbert space.
use super::complex_matrix::{gates, Complex64, ComplexMatrix, ComplexVector};
use super::{constants, QuantumTopologyError, Result};
/// Computational basis for qubits
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum QuantumBasis {
/// Computational basis |0⟩, |1⟩
Computational,
/// Hadamard basis |+⟩, |-⟩
Hadamard,
/// Circular basis |R⟩, |L⟩
Circular,
}
/// Single qubit state
#[derive(Debug, Clone)]
pub struct Qubit {
/// Amplitude for |0⟩
pub alpha: Complex64,
/// Amplitude for |1⟩
pub beta: Complex64,
}
impl Qubit {
/// Create a new qubit state |ψ⟩ = α|0⟩ + β|1⟩
/// Normalizes the state automatically
pub fn new(alpha: Complex64, beta: Complex64) -> Self {
let norm = (alpha.norm_sqr() + beta.norm_sqr()).sqrt();
if norm < constants::EPSILON {
// Default to |0⟩ if both amplitudes are zero
Self {
alpha: Complex64::new(1.0, 0.0),
beta: Complex64::new(0.0, 0.0),
}
} else {
Self {
alpha: alpha / norm,
beta: beta / norm,
}
}
}
/// Create |0⟩ state
pub fn zero() -> Self {
Self {
alpha: Complex64::new(1.0, 0.0),
beta: Complex64::new(0.0, 0.0),
}
}
/// Create |1⟩ state
pub fn one() -> Self {
Self {
alpha: Complex64::new(0.0, 0.0),
beta: Complex64::new(1.0, 0.0),
}
}
/// Create |+⟩ = (|0⟩ + |1⟩)/√2 state
pub fn plus() -> Self {
let s = 1.0 / 2.0_f64.sqrt();
Self {
alpha: Complex64::new(s, 0.0),
beta: Complex64::new(s, 0.0),
}
}
/// Create |-⟩ = (|0⟩ - |1⟩)/√2 state
pub fn minus() -> Self {
let s = 1.0 / 2.0_f64.sqrt();
Self {
alpha: Complex64::new(s, 0.0),
beta: Complex64::new(-s, 0.0),
}
}
/// Create a state from Bloch sphere coordinates (θ, φ)
/// |ψ⟩ = cos(θ/2)|0⟩ + e^{iφ}sin(θ/2)|1⟩
pub fn from_bloch(theta: f64, phi: f64) -> Self {
let alpha = Complex64::new((theta / 2.0).cos(), 0.0);
let beta = Complex64::from_polar((theta / 2.0).sin(), phi);
Self { alpha, beta }
}
/// Get Bloch sphere coordinates (θ, φ)
pub fn to_bloch(&self) -> (f64, f64) {
let theta = 2.0 * self.alpha.norm().acos();
let phi = if self.beta.norm() < constants::EPSILON {
0.0
} else {
(self.beta / self.alpha).arg()
};
(theta, phi)
}
/// Probability of measuring |0⟩
pub fn prob_zero(&self) -> f64 {
self.alpha.norm_sqr()
}
/// Probability of measuring |1⟩
pub fn prob_one(&self) -> f64 {
self.beta.norm_sqr()
}
/// Convert to a ComplexVector representation
pub fn to_vector(&self) -> ComplexVector {
ComplexVector::new(vec![self.alpha, self.beta])
}
/// Apply a single-qubit gate
pub fn apply_gate(&self, gate: &ComplexMatrix) -> Result<Self> {
if gate.rows != 2 || gate.cols != 2 {
return Err(QuantumTopologyError::DimensionMismatch {
expected: 2,
got: gate.rows,
});
}
let new_alpha = gate.get(0, 0) * self.alpha + gate.get(0, 1) * self.beta;
let new_beta = gate.get(1, 0) * self.alpha + gate.get(1, 1) * self.beta;
Ok(Self::new(new_alpha, new_beta))
}
/// Apply Hadamard gate
pub fn hadamard(&self) -> Self {
self.apply_gate(&gates::hadamard()).unwrap()
}
/// Apply Pauli X gate (NOT)
pub fn pauli_x(&self) -> Self {
Self::new(self.beta, self.alpha)
}
/// Apply Pauli Y gate
pub fn pauli_y(&self) -> Self {
let i = Complex64::new(0.0, 1.0);
Self::new(-i * self.beta, i * self.alpha)
}
/// Apply Pauli Z gate
pub fn pauli_z(&self) -> Self {
Self::new(self.alpha, -self.beta)
}
/// Compute inner product ⟨self|other⟩
pub fn inner(&self, other: &Qubit) -> Complex64 {
self.alpha.conj() * other.alpha + self.beta.conj() * other.beta
}
/// Compute fidelity |⟨self|other⟩|²
pub fn fidelity(&self, other: &Qubit) -> f64 {
self.inner(other).norm_sqr()
}
}
/// N-qubit quantum state (pure state)
#[derive(Debug, Clone)]
pub struct QuantumState {
/// State amplitudes in computational basis
pub amplitudes: Vec<Complex64>,
/// Hilbert space dimension (2^n for n qubits)
pub dimension: usize,
}
impl QuantumState {
/// Create a new quantum state from amplitudes
/// Normalizes the state automatically
pub fn new(amplitudes: Vec<Complex64>) -> Result<Self> {
if amplitudes.is_empty() {
return Err(QuantumTopologyError::InvalidQuantumState(
"Empty amplitude vector".to_string(),
));
}
// Check if dimension is a power of 2
let dimension = amplitudes.len();
if dimension != 1 && (dimension & (dimension - 1)) != 0 {
return Err(QuantumTopologyError::InvalidQuantumState(
format!("Dimension {} is not a power of 2", dimension),
));
}
let mut state = Self {
amplitudes,
dimension,
};
state.normalize();
Ok(state)
}
/// Create a quantum state without dimension check (for non-qubit systems)
pub fn new_unchecked(amplitudes: Vec<Complex64>) -> Self {
let dimension = amplitudes.len();
let mut state = Self {
amplitudes,
dimension,
};
state.normalize();
state
}
/// Create computational basis state |i⟩
pub fn basis_state(dimension: usize, index: usize) -> Result<Self> {
if index >= dimension {
return Err(QuantumTopologyError::InvalidQuantumState(
format!("Index {} out of bounds for dimension {}", index, dimension),
));
}
let mut amplitudes = vec![Complex64::new(0.0, 0.0); dimension];
amplitudes[index] = Complex64::new(1.0, 0.0);
Ok(Self {
amplitudes,
dimension,
})
}
/// Create the ground state |0...0⟩ for n qubits
pub fn ground_state(num_qubits: usize) -> Self {
let dimension = 1 << num_qubits;
let mut amplitudes = vec![Complex64::new(0.0, 0.0); dimension];
amplitudes[0] = Complex64::new(1.0, 0.0);
Self {
amplitudes,
dimension,
}
}
/// Create a uniform superposition state
pub fn uniform_superposition(num_qubits: usize) -> Self {
let dimension = 1 << num_qubits;
let amplitude = Complex64::new(1.0 / (dimension as f64).sqrt(), 0.0);
let amplitudes = vec![amplitude; dimension];
Self {
amplitudes,
dimension,
}
}
/// Create a GHZ state (|0...0⟩ + |1...1⟩)/√2
pub fn ghz_state(num_qubits: usize) -> Self {
let dimension = 1 << num_qubits;
let mut amplitudes = vec![Complex64::new(0.0, 0.0); dimension];
let amplitude = Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0);
amplitudes[0] = amplitude;
amplitudes[dimension - 1] = amplitude;
Self {
amplitudes,
dimension,
}
}
/// Create a W state (|10...0⟩ + |01...0⟩ + ... + |0...01⟩)/√n
pub fn w_state(num_qubits: usize) -> Self {
let dimension = 1 << num_qubits;
let mut amplitudes = vec![Complex64::new(0.0, 0.0); dimension];
let amplitude = Complex64::new(1.0 / (num_qubits as f64).sqrt(), 0.0);
for i in 0..num_qubits {
amplitudes[1 << i] = amplitude;
}
Self {
amplitudes,
dimension,
}
}
/// Number of qubits in the system
pub fn num_qubits(&self) -> usize {
(self.dimension as f64).log2() as usize
}
/// Normalize the state in place
pub fn normalize(&mut self) {
let norm: f64 = self.amplitudes.iter().map(|c| c.norm_sqr()).sum::<f64>().sqrt();
if norm > constants::EPSILON {
for c in &mut self.amplitudes {
*c /= norm;
}
}
}
/// Get the norm of the state vector
pub fn norm(&self) -> f64 {
self.amplitudes.iter().map(|c| c.norm_sqr()).sum::<f64>().sqrt()
}
/// Convert to a ComplexVector
pub fn to_vector(&self) -> ComplexVector {
ComplexVector::new(self.amplitudes.clone())
}
/// Create from a ComplexVector
pub fn from_vector(v: ComplexVector) -> Result<Self> {
Self::new(v.data)
}
/// Inner product ⟨self|other⟩
pub fn inner(&self, other: &QuantumState) -> Result<Complex64> {
if self.dimension != other.dimension {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension,
got: other.dimension,
});
}
Ok(self
.amplitudes
.iter()
.zip(other.amplitudes.iter())
.map(|(a, b)| a.conj() * b)
.sum())
}
/// Fidelity |⟨self|other⟩|² (for pure states)
pub fn fidelity(&self, other: &QuantumState) -> Result<f64> {
Ok(self.inner(other)?.norm_sqr())
}
/// Tensor product |self⟩ ⊗ |other⟩
pub fn tensor(&self, other: &QuantumState) -> Self {
let new_dimension = self.dimension * other.dimension;
let mut new_amplitudes = Vec::with_capacity(new_dimension);
for a in &self.amplitudes {
for b in &other.amplitudes {
new_amplitudes.push(a * b);
}
}
Self {
amplitudes: new_amplitudes,
dimension: new_dimension,
}
}
/// Apply a unitary operator to the state
pub fn apply_operator(&self, operator: &ComplexMatrix) -> Result<Self> {
if operator.rows != self.dimension || operator.cols != self.dimension {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension,
got: operator.rows,
});
}
let result = operator.matvec(&self.to_vector());
Ok(Self {
amplitudes: result.data,
dimension: self.dimension,
})
}
/// Apply a single-qubit gate to qubit at position `target`
pub fn apply_single_qubit_gate(&self, gate: &ComplexMatrix, target: usize) -> Result<Self> {
let num_qubits = self.num_qubits();
if target >= num_qubits {
return Err(QuantumTopologyError::InvalidQuantumState(
format!("Target qubit {} out of range for {}-qubit system", target, num_qubits),
));
}
// Build the full operator using tensor products
let mut full_operator = ComplexMatrix::identity(1);
for i in 0..num_qubits {
let op = if i == target {
gate.clone()
} else {
ComplexMatrix::identity(2)
};
full_operator = full_operator.tensor(&op);
}
self.apply_operator(&full_operator)
}
/// Probability of measuring basis state |i⟩
pub fn probability(&self, index: usize) -> f64 {
if index >= self.dimension {
return 0.0;
}
self.amplitudes[index].norm_sqr()
}
/// Get probability distribution over all basis states
pub fn probability_distribution(&self) -> Vec<f64> {
self.amplitudes.iter().map(|c| c.norm_sqr()).collect()
}
/// Measure the state in computational basis (collapses state)
/// Returns the measured index and the collapsed state
pub fn measure(&self, random_value: f64) -> (usize, Self) {
let probs = self.probability_distribution();
let mut cumulative = 0.0;
let mut result = 0;
for (i, &p) in probs.iter().enumerate() {
cumulative += p;
if random_value < cumulative {
result = i;
break;
}
}
// Collapse to measured state
let collapsed = Self::basis_state(self.dimension, result).unwrap();
(result, collapsed)
}
/// Partial measurement of qubit at position `target`
pub fn measure_qubit(&self, target: usize, random_value: f64) -> (bool, Self) {
let num_qubits = self.num_qubits();
if target >= num_qubits {
return (false, self.clone());
}
// Calculate probability of measuring |0⟩
let mut prob_zero = 0.0;
for i in 0..self.dimension {
if (i >> target) & 1 == 0 {
prob_zero += self.amplitudes[i].norm_sqr();
}
}
let measured_one = random_value >= prob_zero;
let normalization = if measured_one {
(1.0 - prob_zero).sqrt()
} else {
prob_zero.sqrt()
};
// Collapse the state
let mut new_amplitudes = vec![Complex64::new(0.0, 0.0); self.dimension];
for i in 0..self.dimension {
let qubit_val = (i >> target) & 1;
if (qubit_val == 1) == measured_one {
new_amplitudes[i] = self.amplitudes[i] / normalization;
}
}
let collapsed = Self {
amplitudes: new_amplitudes,
dimension: self.dimension,
};
(measured_one, collapsed)
}
/// Compute von Neumann entropy (for pure states, this is 0)
/// For entanglement entropy, use partial trace first
pub fn von_neumann_entropy(&self) -> f64 {
// For a pure state, von Neumann entropy is 0
0.0
}
/// Compute the density matrix |ψ⟩⟨ψ|
pub fn to_density_matrix(&self) -> ComplexMatrix {
self.to_vector().outer(&self.to_vector())
}
/// Compute the reduced density matrix by tracing out specified qubits
pub fn reduced_density_matrix(&self, keep_qubits: &[usize]) -> ComplexMatrix {
let num_qubits = self.num_qubits();
let trace_qubits: Vec<usize> = (0..num_qubits)
.filter(|q| !keep_qubits.contains(q))
.collect();
if trace_qubits.is_empty() {
return self.to_density_matrix();
}
let keep_dim = 1 << keep_qubits.len();
let trace_dim = 1 << trace_qubits.len();
let mut reduced = ComplexMatrix::zeros(keep_dim, keep_dim);
for i in 0..keep_dim {
for j in 0..keep_dim {
let mut sum = Complex64::new(0.0, 0.0);
for k in 0..trace_dim {
// Reconstruct full indices
let full_i = self.reconstruct_index(i, k, keep_qubits, &trace_qubits);
let full_j = self.reconstruct_index(j, k, keep_qubits, &trace_qubits);
sum += self.amplitudes[full_i] * self.amplitudes[full_j].conj();
}
reduced.set(i, j, sum);
}
}
reduced
}
/// Helper to reconstruct full index from partial indices
fn reconstruct_index(
&self,
keep_idx: usize,
trace_idx: usize,
keep_qubits: &[usize],
trace_qubits: &[usize],
) -> usize {
let num_qubits = self.num_qubits();
let mut full_idx = 0;
for (i, &q) in keep_qubits.iter().enumerate() {
if (keep_idx >> i) & 1 == 1 {
full_idx |= 1 << q;
}
}
for (i, &q) in trace_qubits.iter().enumerate() {
if (trace_idx >> i) & 1 == 1 {
full_idx |= 1 << q;
}
}
full_idx.min(self.dimension - 1)
}
/// Compute entanglement entropy between subsystems
/// Splits the system at `split_point` qubits from the left
pub fn entanglement_entropy(&self, split_point: usize) -> f64 {
let num_qubits = self.num_qubits();
if split_point == 0 || split_point >= num_qubits {
return 0.0;
}
let keep_qubits: Vec<usize> = (0..split_point).collect();
let reduced = self.reduced_density_matrix(&keep_qubits);
// Compute eigenvalues of reduced density matrix
let eigenvalues = reduced.eigenvalues(100, 1e-10);
// Compute von Neumann entropy: S = -Σ λᵢ log(λᵢ)
let mut entropy = 0.0;
for ev in eigenvalues {
let lambda = ev.re.max(0.0); // Eigenvalues should be non-negative
if lambda > constants::EPSILON {
entropy -= lambda * lambda.ln();
}
}
entropy
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_qubit_basics() {
let q0 = Qubit::zero();
assert!((q0.prob_zero() - 1.0).abs() < 1e-10);
assert!(q0.prob_one() < 1e-10);
let q1 = Qubit::one();
assert!(q1.prob_zero() < 1e-10);
assert!((q1.prob_one() - 1.0).abs() < 1e-10);
}
#[test]
fn test_qubit_plus_minus() {
let plus = Qubit::plus();
assert!((plus.prob_zero() - 0.5).abs() < 1e-10);
assert!((plus.prob_one() - 0.5).abs() < 1e-10);
let minus = Qubit::minus();
assert!((minus.prob_zero() - 0.5).abs() < 1e-10);
}
#[test]
fn test_qubit_hadamard() {
let q0 = Qubit::zero();
let h_q0 = q0.hadamard();
// H|0⟩ = |+⟩
assert!((h_q0.prob_zero() - 0.5).abs() < 1e-10);
assert!((h_q0.prob_one() - 0.5).abs() < 1e-10);
// H²|0⟩ = |0⟩
let hh_q0 = h_q0.hadamard();
assert!((hh_q0.prob_zero() - 1.0).abs() < 1e-10);
}
#[test]
fn test_qubit_fidelity() {
let q0 = Qubit::zero();
let q1 = Qubit::one();
let plus = Qubit::plus();
// Orthogonal states have zero fidelity
assert!(q0.fidelity(&q1) < 1e-10);
// Same state has fidelity 1
assert!((q0.fidelity(&q0) - 1.0).abs() < 1e-10);
// |⟨0|+⟩|² = 0.5
assert!((q0.fidelity(&plus) - 0.5).abs() < 1e-10);
}
#[test]
fn test_quantum_state_ground() {
let state = QuantumState::ground_state(2);
assert_eq!(state.dimension, 4);
assert!((state.probability(0) - 1.0).abs() < 1e-10);
}
#[test]
fn test_quantum_state_ghz() {
let ghz = QuantumState::ghz_state(3);
assert_eq!(ghz.dimension, 8);
// GHZ state has 50% probability on |000⟩ and |111⟩
assert!((ghz.probability(0) - 0.5).abs() < 1e-10);
assert!((ghz.probability(7) - 0.5).abs() < 1e-10);
}
#[test]
fn test_quantum_state_tensor() {
let q0 = QuantumState::basis_state(2, 0).unwrap();
let q1 = QuantumState::basis_state(2, 1).unwrap();
let product = q0.tensor(&q1);
assert_eq!(product.dimension, 4);
// |0⟩ ⊗ |1⟩ = |01⟩ (index 1 in 2-qubit system)
assert!((product.probability(1) - 1.0).abs() < 1e-10);
}
#[test]
fn test_quantum_state_fidelity() {
let s1 = QuantumState::ground_state(2);
let s2 = QuantumState::uniform_superposition(2);
// Ground state vs uniform superposition
let fid = s1.fidelity(&s2).unwrap();
assert!((fid - 0.25).abs() < 1e-10); // |⟨00|++++⟩|² = 1/4
// Self-fidelity is 1
assert!((s1.fidelity(&s1).unwrap() - 1.0).abs() < 1e-10);
}
#[test]
fn test_entanglement_entropy() {
// Product state has zero entanglement entropy
let product = QuantumState::ground_state(2);
let entropy = product.entanglement_entropy(1);
assert!(entropy < 1e-5);
// Bell state has maximum entanglement entropy (log 2)
let mut bell = QuantumState::basis_state(4, 0).unwrap();
bell.amplitudes[0] = Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0);
bell.amplitudes[3] = Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0);
let bell_entropy = bell.entanglement_entropy(1);
// Should be close to ln(2) ≈ 0.693
assert!(bell_entropy > 0.5);
}
}

View File

@@ -0,0 +1,853 @@
//! Simplicial Complex and Algebraic Topology Operations
//!
//! Provides simplicial complexes, boundary maps, and algebraic topology operations
//! for computing homology and cohomology groups.
use std::collections::{HashMap, HashSet, BTreeSet};
use super::{constants, QuantumTopologyError, Result};
use ruvector_solver::types::CsrMatrix;
/// A simplex (k-simplex has k+1 vertices)
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Simplex {
/// Sorted vertex indices
vertices: BTreeSet<usize>,
}
impl Simplex {
/// Create a simplex from vertices
pub fn new(vertices: impl IntoIterator<Item = usize>) -> Self {
Self {
vertices: vertices.into_iter().collect(),
}
}
/// Create a 0-simplex (vertex)
pub fn vertex(v: usize) -> Self {
Self::new([v])
}
/// Create a 1-simplex (edge)
pub fn edge(v0: usize, v1: usize) -> Self {
Self::new([v0, v1])
}
/// Create a 2-simplex (triangle)
pub fn triangle(v0: usize, v1: usize, v2: usize) -> Self {
Self::new([v0, v1, v2])
}
/// Create a 3-simplex (tetrahedron)
pub fn tetrahedron(v0: usize, v1: usize, v2: usize, v3: usize) -> Self {
Self::new([v0, v1, v2, v3])
}
/// Dimension of the simplex (0 = vertex, 1 = edge, ...)
pub fn dim(&self) -> usize {
if self.vertices.is_empty() {
0
} else {
self.vertices.len() - 1
}
}
/// Number of vertices
pub fn num_vertices(&self) -> usize {
self.vertices.len()
}
/// Get vertices as a sorted vector
pub fn vertices(&self) -> Vec<usize> {
self.vertices.iter().copied().collect()
}
/// Check if this is a face of another simplex
pub fn is_face_of(&self, other: &Simplex) -> bool {
self.vertices.is_subset(&other.vertices) && self.vertices != other.vertices
}
/// Get all faces of dimension dim-1 (boundary)
pub fn boundary_faces(&self) -> Vec<(Simplex, i32)> {
if self.vertices.is_empty() {
return vec![];
}
let verts: Vec<usize> = self.vertices();
let mut faces = Vec::with_capacity(verts.len());
for (i, _) in verts.iter().enumerate() {
let face_verts: Vec<usize> = verts
.iter()
.enumerate()
.filter(|(j, _)| *j != i)
.map(|(_, &v)| v)
.collect();
let sign = if i % 2 == 0 { 1 } else { -1 };
faces.push((Simplex::new(face_verts), sign));
}
faces
}
/// Get all faces of the simplex (all dimensions)
pub fn all_faces(&self) -> Vec<Simplex> {
let verts: Vec<usize> = self.vertices();
let n = verts.len();
let mut faces = Vec::new();
// Generate all non-empty subsets
for mask in 1..(1 << n) {
let subset: Vec<usize> = (0..n)
.filter(|i| (mask >> i) & 1 == 1)
.map(|i| verts[i])
.collect();
faces.push(Simplex::new(subset));
}
faces
}
/// Check if two simplices share a common face
pub fn shares_face_with(&self, other: &Simplex) -> bool {
!self.vertices.is_disjoint(&other.vertices)
}
/// Join two simplices (if disjoint)
pub fn join(&self, other: &Simplex) -> Option<Simplex> {
if !self.vertices.is_disjoint(&other.vertices) {
return None;
}
let mut new_vertices = self.vertices.clone();
new_vertices.extend(&other.vertices);
Some(Simplex { vertices: new_vertices })
}
}
impl std::fmt::Display for Simplex {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let verts: Vec<String> = self.vertices.iter().map(|v| v.to_string()).collect();
write!(f, "[{}]", verts.join(","))
}
}
/// Sparse matrix for boundary computations (using HashMap for O(1) access)
#[derive(Debug, Clone)]
pub struct SparseMatrix {
/// (row, col) -> value entries (O(1) access)
entries: HashMap<(usize, usize), i32>,
/// Number of rows
pub rows: usize,
/// Number of columns
pub cols: usize,
}
impl SparseMatrix {
/// Create an empty sparse matrix
pub fn new(rows: usize, cols: usize) -> Self {
Self {
entries: HashMap::new(),
rows,
cols,
}
}
/// Create from dense matrix
pub fn from_dense(dense: &[Vec<i32>]) -> Self {
let rows = dense.len();
let cols = dense.first().map(|r| r.len()).unwrap_or(0);
let mut entries = HashMap::new();
for (i, row) in dense.iter().enumerate() {
for (j, &val) in row.iter().enumerate() {
if val != 0 {
entries.insert((i, j), val);
}
}
}
Self { entries, rows, cols }
}
/// Set a value (O(1) via HashMap)
pub fn set(&mut self, row: usize, col: usize, value: i32) {
if value == 0 {
self.entries.remove(&(row, col));
} else {
self.entries.insert((row, col), value);
}
}
/// Get a value (O(1) via HashMap)
pub fn get(&self, row: usize, col: usize) -> i32 {
self.entries.get(&(row, col)).copied().unwrap_or(0)
}
/// Transpose the matrix
pub fn transpose(&self) -> Self {
Self {
entries: self.entries.iter().map(|(&(r, c), &v)| ((c, r), v)).collect(),
rows: self.cols,
cols: self.rows,
}
}
/// Number of non-zero entries
pub fn nnz(&self) -> usize {
self.entries.len()
}
/// Convert to dense matrix
pub fn to_dense(&self) -> Vec<Vec<i32>> {
let mut dense = vec![vec![0; self.cols]; self.rows];
for (&(r, c), &v) in &self.entries {
if r < self.rows && c < self.cols {
dense[r][c] = v;
}
}
dense
}
/// Matrix-vector multiplication (over integers mod 2)
pub fn matvec_mod2(&self, v: &[u8]) -> Vec<u8> {
let mut result = vec![0u8; self.rows];
for (&(r, c), &val) in &self.entries {
if c < v.len() && r < result.len() {
let product = ((val.abs() as u8) * v[c]) % 2;
result[r] = (result[r] + product) % 2;
}
}
result
}
/// Convert to CsrMatrix<f64> for efficient SpMV operations.
/// Uses ruvector-solver's CSR format with O(nnz) cache-friendly SpMV.
pub fn to_csr_f64(&self) -> CsrMatrix<f64> {
CsrMatrix::<f64>::from_coo(
self.rows,
self.cols,
self.entries.iter().map(|(&(r, c), &v)| (r, c, v as f64)),
)
}
/// Sparse matrix-vector multiply using solver's optimized CSR SpMV.
/// Converts to CsrMatrix internally for cache-efficient computation.
pub fn spmv_f64(&self, x: &[f64]) -> Vec<f64> {
let csr = self.to_csr_f64();
let mut y = vec![0.0f64; self.rows];
csr.spmv(x, &mut y);
y
}
/// Get entries as a Vec of (row, col, value) triples (for iteration).
pub fn triplets(&self) -> Vec<(usize, usize, i32)> {
self.entries.iter().map(|(&(r, c), &v)| (r, c, v)).collect()
}
/// Compute rank over Z/2Z using Gaussian elimination
pub fn rank_mod2(&self) -> usize {
let mut dense: Vec<Vec<u8>> = self.to_dense()
.into_iter()
.map(|row| row.into_iter().map(|v| (v.abs() % 2) as u8).collect())
.collect();
if dense.is_empty() || dense[0].is_empty() {
return 0;
}
let rows = dense.len();
let cols = dense[0].len();
let mut rank = 0;
let mut pivot_col = 0;
for row in 0..rows {
if pivot_col >= cols {
break;
}
// Find pivot
let mut pivot_row = None;
for r in row..rows {
if dense[r][pivot_col] != 0 {
pivot_row = Some(r);
break;
}
}
if let Some(pr) = pivot_row {
// Swap rows
dense.swap(row, pr);
// Eliminate column
for r in 0..rows {
if r != row && dense[r][pivot_col] != 0 {
for c in 0..cols {
dense[r][c] = (dense[r][c] + dense[row][c]) % 2;
}
}
}
rank += 1;
pivot_col += 1;
} else {
pivot_col += 1;
}
}
rank
}
/// Compute kernel (null space) over Z/2Z
pub fn kernel_mod2(&self) -> Vec<Vec<u8>> {
let mut dense: Vec<Vec<u8>> = self.to_dense()
.into_iter()
.map(|row| row.into_iter().map(|v| (v.abs() % 2) as u8).collect())
.collect();
// For a 0-row matrix (0xN), the entire domain is the kernel
if dense.is_empty() {
// Return standard basis vectors for each column
let mut kernel = Vec::new();
for c in 0..self.cols {
let mut vec = vec![0u8; self.cols];
vec[c] = 1;
kernel.push(vec);
}
return kernel;
}
let rows = dense.len();
let cols = dense[0].len();
// Augment with identity for tracking
for (i, row) in dense.iter_mut().enumerate() {
for j in 0..rows {
row.push(if i == j { 1 } else { 0 });
}
}
// Gaussian elimination
let mut pivot_col = 0;
let mut pivot_rows = Vec::new();
for row in 0..rows {
if pivot_col >= cols {
break;
}
// Find pivot
let mut pivot_row = None;
for r in row..rows {
if dense[r][pivot_col] != 0 {
pivot_row = Some(r);
break;
}
}
if let Some(pr) = pivot_row {
dense.swap(row, pr);
for r in 0..rows {
if r != row && dense[r][pivot_col] != 0 {
for c in 0..dense[0].len() {
dense[r][c] = (dense[r][c] + dense[row][c]) % 2;
}
}
}
pivot_rows.push((row, pivot_col));
pivot_col += 1;
} else {
pivot_col += 1;
}
}
// Extract kernel basis (free variables)
let pivot_cols: HashSet<usize> = pivot_rows.iter().map(|&(_, c)| c).collect();
let mut kernel = Vec::new();
for c in 0..cols {
if !pivot_cols.contains(&c) {
let mut vec = vec![0u8; cols];
vec[c] = 1;
for &(r, pc) in &pivot_rows {
if dense[r][c] != 0 {
vec[pc] = 1;
}
}
kernel.push(vec);
}
}
kernel
}
}
/// Boundary matrix for a simplicial complex
#[derive(Debug, Clone)]
pub struct BoundaryMatrix {
/// Sparse boundary matrix ∂_k: C_k → C_{k-1}
pub matrix: SparseMatrix,
/// Dimension k
pub dimension: usize,
/// Domain simplices (k-simplices)
pub domain: Vec<Simplex>,
/// Codomain simplices ((k-1)-simplices)
pub codomain: Vec<Simplex>,
}
impl BoundaryMatrix {
/// Create a boundary matrix for dimension k
pub fn new(k_simplices: &[Simplex], k_minus_1_simplices: &[Simplex]) -> Self {
let rows = k_minus_1_simplices.len();
let cols = k_simplices.len();
let mut matrix = SparseMatrix::new(rows, cols);
// Build simplex to index mapping for codomain
let codomain_indices: HashMap<&Simplex, usize> = k_minus_1_simplices
.iter()
.enumerate()
.map(|(i, s)| (s, i))
.collect();
// Build boundary matrix
for (col, sigma) in k_simplices.iter().enumerate() {
for (face, sign) in sigma.boundary_faces() {
if let Some(&row) = codomain_indices.get(&face) {
matrix.set(row, col, sign);
}
}
}
Self {
matrix,
dimension: if k_simplices.is_empty() { 0 } else { k_simplices[0].dim() },
domain: k_simplices.to_vec(),
codomain: k_minus_1_simplices.to_vec(),
}
}
/// Compute the image (over Z/2Z)
pub fn image_rank(&self) -> usize {
self.matrix.rank_mod2()
}
/// Compute the kernel (over Z/2Z)
pub fn kernel(&self) -> Vec<Vec<u8>> {
self.matrix.kernel_mod2()
}
}
/// Simplicial complex with boundary chain structure
#[derive(Debug, Clone)]
pub struct SimplicialComplex {
/// Simplices organized by dimension
simplices: Vec<HashSet<Simplex>>,
/// Maximum dimension
max_dim: usize,
}
impl SimplicialComplex {
/// Create an empty simplicial complex
pub fn new() -> Self {
Self {
simplices: vec![HashSet::new()],
max_dim: 0,
}
}
/// Create from a list of simplices (automatically adds faces)
pub fn from_simplices(simplices: impl IntoIterator<Item = Simplex>) -> Self {
let mut complex = Self::new();
for s in simplices {
complex.add_simplex(s);
}
complex
}
/// Add a simplex and all its faces
pub fn add_simplex(&mut self, simplex: Simplex) {
// Ensure we have enough dimensions
let dim = simplex.dim();
while self.simplices.len() <= dim {
self.simplices.push(HashSet::new());
}
self.max_dim = self.max_dim.max(dim);
// Add simplex and all faces
for face in simplex.all_faces() {
let face_dim = face.dim();
if face_dim < self.simplices.len() {
self.simplices[face_dim].insert(face);
}
}
}
/// Check if a simplex is in the complex
pub fn contains(&self, simplex: &Simplex) -> bool {
let dim = simplex.dim();
if dim >= self.simplices.len() {
return false;
}
self.simplices[dim].contains(simplex)
}
/// Get all simplices of dimension k
pub fn simplices_of_dim(&self, k: usize) -> Vec<Simplex> {
if k >= self.simplices.len() {
return vec![];
}
self.simplices[k].iter().cloned().collect()
}
/// Get all simplices
pub fn all_simplices(&self) -> Vec<Simplex> {
self.simplices.iter().flat_map(|s| s.iter().cloned()).collect()
}
/// Number of simplices of dimension k
pub fn count(&self, k: usize) -> usize {
self.simplices.get(k).map(|s| s.len()).unwrap_or(0)
}
/// Total number of simplices
pub fn size(&self) -> usize {
self.simplices.iter().map(|s| s.len()).sum()
}
/// Maximum dimension
pub fn dimension(&self) -> usize {
self.max_dim
}
/// f-vector: (f_0, f_1, f_2, ...) = counts at each dimension
pub fn f_vector(&self) -> Vec<usize> {
self.simplices.iter().map(|s| s.len()).collect()
}
/// Euler characteristic: χ = Σ (-1)^k f_k
pub fn euler_characteristic(&self) -> i64 {
self.simplices
.iter()
.enumerate()
.map(|(k, s)| {
let sign = if k % 2 == 0 { 1 } else { -1 };
sign * s.len() as i64
})
.sum()
}
/// Get the boundary matrix ∂_k: C_k → C_{k-1}
pub fn boundary_matrix(&self, k: usize) -> BoundaryMatrix {
let k_simplices = self.simplices_of_dim(k);
let k_minus_1_simplices = if k > 0 {
self.simplices_of_dim(k - 1)
} else {
vec![]
};
BoundaryMatrix::new(&k_simplices, &k_minus_1_simplices)
}
/// Compute Betti number β_k (over Z/2Z)
/// β_k = dim(ker(∂_k)) - dim(im(∂_{k+1}))
pub fn betti_number(&self, k: usize) -> usize {
let boundary_k = self.boundary_matrix(k);
let boundary_k_plus_1 = self.boundary_matrix(k + 1);
let kernel_dim = boundary_k.kernel().len();
let image_dim = boundary_k_plus_1.image_rank();
kernel_dim.saturating_sub(image_dim)
}
/// Compute all Betti numbers up to max dimension
pub fn betti_numbers(&self) -> Vec<usize> {
(0..=self.max_dim).map(|k| self.betti_number(k)).collect()
}
/// Compute homology generators (as chains)
pub fn homology_generators(&self, k: usize) -> Vec<Vec<Simplex>> {
let boundary_k = self.boundary_matrix(k);
let boundary_k_plus_1 = self.boundary_matrix(k + 1);
let cycles = boundary_k.kernel();
let boundaries = boundary_k_plus_1.image_rank();
// Return cycle representatives (simplified - doesn't mod out boundaries)
let k_simplices = self.simplices_of_dim(k);
let num_generators = cycles.len().saturating_sub(boundaries);
cycles
.into_iter()
.take(num_generators)
.map(|cycle| {
cycle
.into_iter()
.enumerate()
.filter(|(_, v)| *v != 0)
.map(|(i, _)| k_simplices[i].clone())
.collect()
})
.collect()
}
/// Cup product at the chain level (simplified)
/// For cochains α ∈ C^p and β ∈ C^q, compute α β ∈ C^{p+q}
pub fn cup_product(
&self,
alpha: &[f64], // p-cochain values on p-simplices
beta: &[f64], // q-cochain values on q-simplices
p: usize,
q: usize,
) -> Vec<f64> {
let p_plus_q_simplices = self.simplices_of_dim(p + q);
let mut result = vec![0.0; p_plus_q_simplices.len()];
for (i, sigma) in p_plus_q_simplices.iter().enumerate() {
let vertices = sigma.vertices();
if vertices.len() >= p + q + 1 {
// Front p-face: [v_0, ..., v_p]
let front: Vec<usize> = vertices[..=p].to_vec();
// Back q-face: [v_p, ..., v_{p+q}]
let back: Vec<usize> = vertices[p..].to_vec();
let front_simplex = Simplex::new(front);
let back_simplex = Simplex::new(back);
// Find indices in respective dimensions
let p_simplices = self.simplices_of_dim(p);
let q_simplices = self.simplices_of_dim(q);
let front_idx = p_simplices.iter().position(|s| s == &front_simplex);
let back_idx = q_simplices.iter().position(|s| s == &back_simplex);
if let (Some(fi), Some(bi)) = (front_idx, back_idx) {
if fi < alpha.len() && bi < beta.len() {
result[i] = alpha[fi] * beta[bi];
}
}
}
}
result
}
/// Compute the graph Laplacian L = D - A as a CsrMatrix<f64>.
/// Uses the 1-skeleton (edges) to build the adjacency matrix, then
/// L_ii = degree(i), L_ij = -1 if edge (i,j) exists.
pub fn graph_laplacian_csr(&self) -> CsrMatrix<f64> {
let num_vertices = self.count(0);
let edges = self.simplices_of_dim(1);
let mut entries: Vec<(usize, usize, f64)> = Vec::new();
let mut degree = vec![0usize; num_vertices];
// Get vertex mapping (since vertices may not be 0..n)
let vertices = self.simplices_of_dim(0);
let vertex_map: HashMap<Vec<usize>, usize> = vertices.iter()
.enumerate()
.map(|(idx, s)| (s.vertices(), idx))
.collect();
for edge in &edges {
let verts = edge.vertices();
if verts.len() == 2 {
if let (Some(&i), Some(&j)) = (vertex_map.get(&vec![verts[0]]), vertex_map.get(&vec![verts[1]])) {
entries.push((i, j, -1.0));
entries.push((j, i, -1.0));
degree[i] += 1;
degree[j] += 1;
}
}
}
// Add diagonal (degree)
for (i, &d) in degree.iter().enumerate() {
entries.push((i, i, d as f64));
}
CsrMatrix::<f64>::from_coo(num_vertices, num_vertices, entries)
}
}
impl Default for SimplicialComplex {
fn default() -> Self {
Self::new()
}
}
/// Create standard simplicial complexes
pub mod standard_complexes {
use super::*;
/// Create k-simplex (filled)
pub fn simplex(k: usize) -> SimplicialComplex {
let vertices: Vec<usize> = (0..=k).collect();
let simplex = Simplex::new(vertices);
SimplicialComplex::from_simplices([simplex])
}
/// Create k-sphere (boundary of (k+1)-simplex)
pub fn sphere(k: usize) -> SimplicialComplex {
let simplex_vertices: Vec<usize> = (0..=k + 1).collect();
let big_simplex = Simplex::new(simplex_vertices);
// Get all k-faces
let mut complex = SimplicialComplex::new();
for (face, _) in big_simplex.boundary_faces() {
complex.add_simplex(face);
}
complex
}
/// Create torus (triangulated)
pub fn torus() -> SimplicialComplex {
// Minimal triangulation of torus with 7 vertices
let triangles = [
[0, 1, 2], [0, 2, 3], [0, 3, 5], [0, 5, 6], [0, 4, 6], [0, 1, 4],
[1, 2, 4], [2, 4, 5], [2, 3, 5], [3, 4, 6], [3, 5, 6], [1, 3, 4],
[1, 3, 6], [1, 2, 6], [2, 5, 6],
];
SimplicialComplex::from_simplices(
triangles.iter().map(|&[a, b, c]| Simplex::triangle(a, b, c))
)
}
/// Create Klein bottle (triangulated)
pub fn klein_bottle() -> SimplicialComplex {
// Minimal triangulation with identification
// Similar structure to torus but with different identifications
let triangles = [
[0, 1, 4], [0, 4, 3], [0, 3, 2], [0, 2, 5], [0, 5, 1],
[1, 4, 5], [2, 3, 6], [3, 4, 6], [4, 5, 6], [1, 2, 5],
[1, 2, 6], [2, 5, 6], [3, 4, 7], [4, 6, 7], [3, 6, 7],
];
SimplicialComplex::from_simplices(
triangles.iter().map(|&[a, b, c]| Simplex::triangle(a, b, c))
)
}
/// Create projective plane RP² (triangulated)
pub fn projective_plane() -> SimplicialComplex {
// 6-vertex triangulation
let triangles = [
[0, 1, 2], [0, 2, 4], [0, 1, 5], [0, 4, 5], [1, 2, 3],
[1, 3, 5], [2, 3, 4], [3, 4, 5],
];
SimplicialComplex::from_simplices(
triangles.iter().map(|&[a, b, c]| Simplex::triangle(a, b, c))
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_simplex_basics() {
let vertex = Simplex::vertex(0);
assert_eq!(vertex.dim(), 0);
let edge = Simplex::edge(0, 1);
assert_eq!(edge.dim(), 1);
let triangle = Simplex::triangle(0, 1, 2);
assert_eq!(triangle.dim(), 2);
}
#[test]
fn test_simplex_boundary() {
let triangle = Simplex::triangle(0, 1, 2);
let boundary = triangle.boundary_faces();
assert_eq!(boundary.len(), 3);
// Check edges are correct
let edges: Vec<Simplex> = boundary.iter().map(|(s, _)| s.clone()).collect();
assert!(edges.contains(&Simplex::edge(1, 2)));
assert!(edges.contains(&Simplex::edge(0, 2)));
assert!(edges.contains(&Simplex::edge(0, 1)));
}
#[test]
fn test_simplicial_complex_triangle() {
let complex = SimplicialComplex::from_simplices([Simplex::triangle(0, 1, 2)]);
assert_eq!(complex.count(0), 3); // 3 vertices
assert_eq!(complex.count(1), 3); // 3 edges
assert_eq!(complex.count(2), 1); // 1 triangle
// Euler characteristic: χ = 3 - 3 + 1 = 1
assert_eq!(complex.euler_characteristic(), 1);
}
#[test]
fn test_betti_numbers_triangle() {
let complex = SimplicialComplex::from_simplices([Simplex::triangle(0, 1, 2)]);
let betti = complex.betti_numbers();
// Filled triangle is contractible: β_0 = 1, β_1 = 0, β_2 = 0
assert_eq!(betti[0], 1);
assert_eq!(betti[1], 0);
}
#[test]
fn test_betti_numbers_circle() {
// Circle = triangle boundary (no filling)
let mut complex = SimplicialComplex::new();
complex.add_simplex(Simplex::edge(0, 1));
complex.add_simplex(Simplex::edge(1, 2));
complex.add_simplex(Simplex::edge(0, 2));
let betti = complex.betti_numbers();
// Circle: β_0 = 1 (connected), β_1 = 1 (one loop)
assert_eq!(betti[0], 1);
assert_eq!(betti[1], 1);
}
#[test]
fn test_sparse_matrix_rank() {
// Simple 2x3 matrix with rank 2
let matrix = SparseMatrix::from_dense(&[
vec![1, 0, 1],
vec![0, 1, 1],
]);
assert_eq!(matrix.rank_mod2(), 2);
}
#[test]
fn test_sparse_matrix_kernel() {
// Matrix with non-trivial kernel
let matrix = SparseMatrix::from_dense(&[
vec![1, 1, 0],
vec![0, 0, 0],
]);
let kernel = matrix.kernel_mod2();
assert!(!kernel.is_empty());
}
#[test]
fn test_standard_simplex() {
let simplex_2 = standard_complexes::simplex(2);
assert_eq!(simplex_2.euler_characteristic(), 1);
}
#[test]
fn test_standard_sphere() {
// 1-sphere should have χ = 0 (V - E = n - n = 0 for a cycle)
let sphere_1 = standard_complexes::sphere(1);
// Actually S^1 has χ = 0
let chi = sphere_1.euler_characteristic();
assert!(chi == 0 || chi == 2); // Depending on triangulation
}
}

View File

@@ -0,0 +1,941 @@
//! Topological Quantum Codes
//!
//! Implements topological quantum error correcting codes, graph states,
//! and structure-preserving quantum encodings.
use super::complex_matrix::{gates, Complex64, ComplexMatrix, ComplexVector};
use super::quantum_channel::{PauliOperator, PauliType};
use super::quantum_state::QuantumState;
use super::topological_invariant::TopologicalInvariant;
use super::{constants, QuantumTopologyError, Result};
use ruvector_solver::types::CsrMatrix;
use std::collections::{HashMap, HashSet};
/// Stabilizer code representation
#[derive(Debug, Clone)]
pub struct StabilizerCode {
/// Stabilizer generators
pub stabilizers: Vec<PauliOperator>,
/// Logical X operators
pub logical_x: Vec<PauliOperator>,
/// Logical Z operators
pub logical_z: Vec<PauliOperator>,
/// Number of physical qubits
pub num_physical: usize,
/// Number of logical qubits
pub num_logical: usize,
}
impl StabilizerCode {
/// Create a new stabilizer code
pub fn new(
stabilizers: Vec<PauliOperator>,
logical_x: Vec<PauliOperator>,
logical_z: Vec<PauliOperator>,
num_physical: usize,
) -> Result<Self> {
// Verify stabilizers commute
for (i, s1) in stabilizers.iter().enumerate() {
for s2 in stabilizers.iter().skip(i + 1) {
if !s1.commutes_with(s2) {
return Err(QuantumTopologyError::InvalidTopologicalCode(
"Stabilizers must commute".to_string(),
));
}
}
}
// Number of logical qubits
let num_logical = logical_x.len();
if num_logical != logical_z.len() {
return Err(QuantumTopologyError::InvalidTopologicalCode(
"Number of logical X and Z operators must match".to_string(),
));
}
Ok(Self {
stabilizers,
logical_x,
logical_z,
num_physical,
num_logical,
})
}
/// Create the 3-qubit bit-flip code
pub fn bit_flip_code() -> Self {
// [[3,1,1]] code - protects against bit-flip errors
// Stabilizers: Z₁Z₂, Z₂Z₃
// Logical X: X₁X₂X₃
// Logical Z: Z₁
let s1 = PauliOperator::new(vec![PauliType::Z, PauliType::Z, PauliType::I]);
let s2 = PauliOperator::new(vec![PauliType::I, PauliType::Z, PauliType::Z]);
let lx = PauliOperator::new(vec![PauliType::X, PauliType::X, PauliType::X]);
let lz = PauliOperator::new(vec![PauliType::Z, PauliType::I, PauliType::I]);
Self {
stabilizers: vec![s1, s2],
logical_x: vec![lx],
logical_z: vec![lz],
num_physical: 3,
num_logical: 1,
}
}
/// Create the 3-qubit phase-flip code
pub fn phase_flip_code() -> Self {
// Stabilizers: X₁X₂, X₂X₃
// Logical X: X₁
// Logical Z: Z₁Z₂Z₃
let s1 = PauliOperator::new(vec![PauliType::X, PauliType::X, PauliType::I]);
let s2 = PauliOperator::new(vec![PauliType::I, PauliType::X, PauliType::X]);
let lx = PauliOperator::new(vec![PauliType::X, PauliType::I, PauliType::I]);
let lz = PauliOperator::new(vec![PauliType::Z, PauliType::Z, PauliType::Z]);
Self {
stabilizers: vec![s1, s2],
logical_x: vec![lx],
logical_z: vec![lz],
num_physical: 3,
num_logical: 1,
}
}
/// Create the 5-qubit perfect code [[5,1,3]]
pub fn five_qubit_code() -> Self {
// Stabilizers (cyclic permutations of XZZXI)
let s1 = PauliOperator::new(vec![
PauliType::X, PauliType::Z, PauliType::Z, PauliType::X, PauliType::I,
]);
let s2 = PauliOperator::new(vec![
PauliType::I, PauliType::X, PauliType::Z, PauliType::Z, PauliType::X,
]);
let s3 = PauliOperator::new(vec![
PauliType::X, PauliType::I, PauliType::X, PauliType::Z, PauliType::Z,
]);
let s4 = PauliOperator::new(vec![
PauliType::Z, PauliType::X, PauliType::I, PauliType::X, PauliType::Z,
]);
let lx = PauliOperator::new(vec![
PauliType::X, PauliType::X, PauliType::X, PauliType::X, PauliType::X,
]);
let lz = PauliOperator::new(vec![
PauliType::Z, PauliType::Z, PauliType::Z, PauliType::Z, PauliType::Z,
]);
Self {
stabilizers: vec![s1, s2, s3, s4],
logical_x: vec![lx],
logical_z: vec![lz],
num_physical: 5,
num_logical: 1,
}
}
/// Create the Steane [[7,1,3]] code
pub fn steane_code() -> Self {
// CSS code based on Hamming [7,4,3] code
// This is a simplified version - full implementation would use parity check matrices
let mut stabilizers = Vec::new();
// X-type stabilizers (from H matrix)
stabilizers.push(PauliOperator::new(vec![
PauliType::X, PauliType::X, PauliType::X, PauliType::X,
PauliType::I, PauliType::I, PauliType::I,
]));
stabilizers.push(PauliOperator::new(vec![
PauliType::X, PauliType::X, PauliType::I, PauliType::I,
PauliType::X, PauliType::X, PauliType::I,
]));
stabilizers.push(PauliOperator::new(vec![
PauliType::X, PauliType::I, PauliType::X, PauliType::I,
PauliType::X, PauliType::I, PauliType::X,
]));
// Z-type stabilizers
stabilizers.push(PauliOperator::new(vec![
PauliType::Z, PauliType::Z, PauliType::Z, PauliType::Z,
PauliType::I, PauliType::I, PauliType::I,
]));
stabilizers.push(PauliOperator::new(vec![
PauliType::Z, PauliType::Z, PauliType::I, PauliType::I,
PauliType::Z, PauliType::Z, PauliType::I,
]));
stabilizers.push(PauliOperator::new(vec![
PauliType::Z, PauliType::I, PauliType::Z, PauliType::I,
PauliType::Z, PauliType::I, PauliType::Z,
]));
let lx = PauliOperator::new(vec![
PauliType::X, PauliType::X, PauliType::X, PauliType::X,
PauliType::X, PauliType::X, PauliType::X,
]);
let lz = PauliOperator::new(vec![
PauliType::Z, PauliType::Z, PauliType::Z, PauliType::Z,
PauliType::Z, PauliType::Z, PauliType::Z,
]);
Self {
stabilizers,
logical_x: vec![lx],
logical_z: vec![lz],
num_physical: 7,
num_logical: 1,
}
}
/// Code distance (minimum weight of logical operator)
pub fn distance(&self) -> usize {
let mut min_weight = usize::MAX;
for op in &self.logical_x {
min_weight = min_weight.min(op.weight());
}
for op in &self.logical_z {
min_weight = min_weight.min(op.weight());
}
min_weight
}
/// Code parameters [[n, k, d]]
pub fn parameters(&self) -> (usize, usize, usize) {
(self.num_physical, self.num_logical, self.distance())
}
/// Compute syndrome for an error
pub fn syndrome(&self, error: &PauliOperator) -> Vec<bool> {
self.stabilizers
.iter()
.map(|s| !s.commutes_with(error))
.collect()
}
/// Check if an error is correctable (has non-trivial syndrome or is in stabilizer group)
pub fn is_correctable(&self, error: &PauliOperator) -> bool {
let syn = self.syndrome(error);
// Non-trivial syndrome means detectable
syn.iter().any(|&b| b)
}
}
/// Topological code (surface code, color code, etc.)
#[derive(Debug, Clone)]
pub struct TopologicalCode {
/// Underlying stabilizer code
pub stabilizer_code: StabilizerCode,
/// Code distance
pub code_distance: usize,
/// Lattice dimensions (for surface codes)
pub lattice_size: Option<(usize, usize)>,
/// Code type
pub code_type: TopologicalCodeType,
}
/// Type of topological code
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TopologicalCodeType {
/// Kitaev's toric code
ToricCode,
/// Planar surface code
SurfaceCode,
/// Color code
ColorCode,
/// Generic CSS code
CSSCode,
}
impl TopologicalCode {
/// Create a surface code of given size
pub fn surface_code(size: usize) -> Self {
// Simplified surface code construction
// Full implementation would build from lattice geometry
let num_data = size * size;
let num_ancilla = (size - 1) * (size - 1) + (size - 1) * (size - 1);
let num_physical = num_data; // Simplified
// Generate stabilizers from lattice
let mut stabilizers = Vec::new();
// X-type (plaquette) stabilizers
for i in 0..(size - 1) {
for j in 0..(size - 1) {
let mut paulis = vec![PauliType::I; num_physical];
// Four qubits around plaquette
let indices = [
i * size + j,
i * size + j + 1,
(i + 1) * size + j,
(i + 1) * size + j + 1,
];
for &idx in &indices {
if idx < num_physical {
paulis[idx] = PauliType::X;
}
}
stabilizers.push(PauliOperator::new(paulis));
}
}
// Z-type (vertex) stabilizers
for i in 0..(size - 1) {
for j in 0..(size - 1) {
let mut paulis = vec![PauliType::I; num_physical];
let indices = [
i * size + j,
i * size + j + 1,
(i + 1) * size + j,
(i + 1) * size + j + 1,
];
for &idx in &indices {
if idx < num_physical {
paulis[idx] = PauliType::Z;
}
}
stabilizers.push(PauliOperator::new(paulis));
}
}
// Logical operators (simplified)
let mut lx_paulis = vec![PauliType::I; num_physical];
let mut lz_paulis = vec![PauliType::I; num_physical];
for i in 0..size {
if i < num_physical {
lx_paulis[i] = PauliType::X;
lz_paulis[i * size] = PauliType::Z;
}
}
let logical_x = vec![PauliOperator::new(lx_paulis)];
let logical_z = vec![PauliOperator::new(lz_paulis)];
let stabilizer_code = StabilizerCode {
stabilizers,
logical_x,
logical_z,
num_physical,
num_logical: 1,
};
Self {
stabilizer_code,
code_distance: size,
lattice_size: Some((size, size)),
code_type: TopologicalCodeType::SurfaceCode,
}
}
/// Create toric code of given size
pub fn toric_code(size: usize) -> Self {
// Similar to surface code but with periodic boundary conditions
let mut code = Self::surface_code(size);
code.code_type = TopologicalCodeType::ToricCode;
code
}
/// Get code parameters [[n, k, d]]
pub fn parameters(&self) -> (usize, usize, usize) {
(
self.stabilizer_code.num_physical,
self.stabilizer_code.num_logical,
self.code_distance,
)
}
/// Error correction threshold (simplified estimate)
pub fn threshold_estimate(&self) -> f64 {
// Surface codes have ~1% threshold for depolarizing noise
match self.code_type {
TopologicalCodeType::SurfaceCode => 0.01,
TopologicalCodeType::ToricCode => 0.01,
TopologicalCodeType::ColorCode => 0.015,
TopologicalCodeType::CSSCode => 0.001,
}
}
}
/// Graph state representation
#[derive(Debug, Clone)]
pub struct GraphState {
/// Adjacency list representation
pub adjacency: Vec<HashSet<usize>>,
/// Number of vertices (qubits)
pub num_vertices: usize,
}
impl GraphState {
/// Create a graph state from adjacency list
pub fn new(adjacency: Vec<HashSet<usize>>) -> Self {
let num_vertices = adjacency.len();
Self {
adjacency,
num_vertices,
}
}
/// Create from edge list
pub fn from_edges(num_vertices: usize, edges: &[(usize, usize)]) -> Self {
let mut adjacency = vec![HashSet::new(); num_vertices];
for &(i, j) in edges {
if i < num_vertices && j < num_vertices {
adjacency[i].insert(j);
adjacency[j].insert(i);
}
}
Self {
adjacency,
num_vertices,
}
}
/// Create a linear cluster state
pub fn linear(n: usize) -> Self {
let edges: Vec<(usize, usize)> = (0..n.saturating_sub(1)).map(|i| (i, i + 1)).collect();
Self::from_edges(n, &edges)
}
/// Create a 2D grid cluster state
pub fn grid(rows: usize, cols: usize) -> Self {
let n = rows * cols;
let mut edges = Vec::new();
for i in 0..rows {
for j in 0..cols {
let idx = i * cols + j;
// Horizontal edge
if j + 1 < cols {
edges.push((idx, idx + 1));
}
// Vertical edge
if i + 1 < rows {
edges.push((idx, idx + cols));
}
}
}
Self::from_edges(n, &edges)
}
/// Create a complete graph state K_n
pub fn complete(n: usize) -> Self {
let mut edges = Vec::new();
for i in 0..n {
for j in (i + 1)..n {
edges.push((i, j));
}
}
Self::from_edges(n, &edges)
}
/// Create a star graph state
pub fn star(n: usize) -> Self {
if n == 0 {
return Self::new(vec![]);
}
let edges: Vec<(usize, usize)> = (1..n).map(|i| (0, i)).collect();
Self::from_edges(n, &edges)
}
/// Encode graph state as quantum state
/// |G⟩ = Π_{(i,j)∈E} CZ_{ij} |+⟩^⊗n
pub fn to_quantum_state(&self) -> QuantumState {
let dim = 1 << self.num_vertices;
// Start with |+⟩^⊗n
let amplitude = Complex64::new(1.0 / (dim as f64).sqrt(), 0.0);
let mut amplitudes = vec![amplitude; dim];
// Apply CZ gates for each edge
for (i, neighbors) in self.adjacency.iter().enumerate() {
for &j in neighbors {
if j > i {
// Apply CZ: flip phase when both qubits are |1⟩
for k in 0..dim {
let bit_i = (k >> i) & 1;
let bit_j = (k >> j) & 1;
if bit_i == 1 && bit_j == 1 {
amplitudes[k] = -amplitudes[k];
}
}
}
}
}
QuantumState {
amplitudes,
dimension: dim,
}
}
/// Get stabilizer generators for graph state
/// K_a = X_a ⊗_{b∈N(a)} Z_b
pub fn stabilizer_generators(&self) -> Vec<PauliOperator> {
(0..self.num_vertices)
.map(|a| {
let mut paulis = vec![PauliType::I; self.num_vertices];
paulis[a] = PauliType::X;
for &b in &self.adjacency[a] {
paulis[b] = PauliType::Z;
}
PauliOperator::new(paulis)
})
.collect()
}
/// Local Clifford equivalence (simplified check)
pub fn is_lc_equivalent(&self, other: &GraphState) -> bool {
// Two graph states are LC-equivalent if they have the same number of vertices
// and edges (necessary but not sufficient condition)
if self.num_vertices != other.num_vertices {
return false;
}
let self_edges: usize = self.adjacency.iter().map(|s| s.len()).sum::<usize>() / 2;
let other_edges: usize = other.adjacency.iter().map(|s| s.len()).sum::<usize>() / 2;
self_edges == other_edges
}
/// Compute Schmidt rank across bipartition
pub fn schmidt_rank(&self, partition_a: &HashSet<usize>) -> usize {
// Count edges crossing the bipartition
let mut crossing_edges = 0;
for (i, neighbors) in self.adjacency.iter().enumerate() {
let i_in_a = partition_a.contains(&i);
for &j in neighbors {
let j_in_a = partition_a.contains(&j);
if i_in_a != j_in_a && i < j {
crossing_edges += 1;
}
}
}
1 << crossing_edges
}
}
/// Structure-preserving quantum encoder
pub struct StructurePreservingEncoder {
/// Encoding dimension
pub input_dim: usize,
/// Number of qubits
pub num_qubits: usize,
}
impl StructurePreservingEncoder {
/// Create a new encoder
pub fn new(input_dim: usize, num_qubits: usize) -> Self {
Self {
input_dim,
num_qubits,
}
}
/// Encode classical data using amplitude encoding
pub fn amplitude_encode(&self, data: &[f64]) -> Result<QuantumState> {
let dim = 1 << self.num_qubits;
if data.len() > dim {
return Err(QuantumTopologyError::DimensionMismatch {
expected: dim,
got: data.len(),
});
}
// Pad with zeros and normalize
let mut amplitudes: Vec<Complex64> = data.iter().map(|&x| Complex64::new(x, 0.0)).collect();
amplitudes.resize(dim, Complex64::new(0.0, 0.0));
let norm: f64 = amplitudes.iter().map(|c| c.norm_sqr()).sum::<f64>().sqrt();
if norm > constants::EPSILON {
for c in &mut amplitudes {
*c /= norm;
}
} else {
amplitudes[0] = Complex64::new(1.0, 0.0);
}
Ok(QuantumState {
amplitudes,
dimension: dim,
})
}
/// Encode using angle encoding (data -> rotation angles)
pub fn angle_encode(&self, data: &[f64]) -> Result<QuantumState> {
let n = data.len().min(self.num_qubits);
// Start with |0...0⟩
let dim = 1 << self.num_qubits;
let mut state = QuantumState::ground_state(self.num_qubits);
// Apply Ry rotation to each qubit based on data
for (i, &x) in data.iter().enumerate().take(n) {
let ry = gates::ry(x * std::f64::consts::PI);
state = state.apply_single_qubit_gate(&ry, i)?;
}
Ok(state)
}
/// Encode preserving topological structure
pub fn topology_preserving_encode(
&self,
data: &[f64],
topology: &TopologicalInvariant,
) -> Result<QuantumState> {
// Use Betti numbers to guide encoding structure
let b0 = topology.betti(0);
let b1 = topology.betti(1);
// Create graph state based on topological structure
// More connected components -> more isolated qubits
// More loops -> more entanglement
let graph = if b1 > 0 {
// Create entangled structure for non-trivial topology
GraphState::grid(
(self.num_qubits as f64).sqrt() as usize,
(self.num_qubits as f64).sqrt() as usize,
)
} else if b0 > 1 {
// Multiple components -> star graph
GraphState::star(self.num_qubits)
} else {
// Simple topology -> linear cluster
GraphState::linear(self.num_qubits)
};
let mut state = graph.to_quantum_state();
// Modulate amplitudes based on data
let encoded = self.amplitude_encode(data)?;
// Combine: multiply amplitudes element-wise and renormalize
for (a, b) in state.amplitudes.iter_mut().zip(encoded.amplitudes.iter()) {
*a = (*a + *b) / 2.0;
}
state.normalize();
Ok(state)
}
}
/// Solver-backed sparse quantum operator for high-qubit-count simulation.
///
/// Uses CsrMatrix SpMV from ruvector-solver to apply sparse operators to
/// quantum states without materializing the full 2^n x 2^n matrix.
/// This pushes effective qubit count from ~33 to 40-60 by exploiting
/// operator sparsity and state vector locality.
///
/// # Scaling
/// - 33 qubits: 8.6 billion amplitudes, ~64 GB dense -- IMPOSSIBLE with dense
/// - 40 qubits: 1 trillion amplitudes -- POSSIBLE if operator is O(n)-sparse
/// - 45 qubits: 35 trillion -- POSSIBLE with banded/local operators
/// - 60 qubits: -- POSSIBLE only with tensor-network factorization
pub struct SolverBackedOperator {
/// Number of qubits
pub num_qubits: usize,
/// Sparse operator stored as CsrMatrix for efficient SpMV
operator: CsrMatrix<f64>,
/// Whether the operator preserves unitarity (approximately)
pub is_unitary: bool,
}
impl SolverBackedOperator {
/// Create from explicit sparse entries.
/// entries: (row, col, real_value) triples for the operator matrix.
pub fn from_sparse(num_qubits: usize, entries: Vec<(usize, usize, f64)>) -> Self {
let dim = 1usize << num_qubits;
Self {
num_qubits,
operator: CsrMatrix::<f64>::from_coo(dim, dim, entries),
is_unitary: false, // caller can set
}
}
/// Create a diagonal operator (e.g., phase gates).
/// diagonal[i] multiplies the i-th basis state amplitude.
pub fn diagonal(num_qubits: usize, diagonal: &[f64]) -> Self {
let dim = 1usize << num_qubits;
let entries: Vec<(usize, usize, f64)> = diagonal
.iter()
.enumerate()
.take(dim)
.filter(|(_, &v)| v.abs() > 1e-15)
.map(|(i, &v)| (i, i, v))
.collect();
Self {
num_qubits,
operator: CsrMatrix::<f64>::from_coo(dim, dim, entries),
is_unitary: true,
}
}
/// Create a banded operator (local interactions only).
/// bandwidth: how far off-diagonal the operator extends.
/// This models nearest-neighbor qubit interactions.
pub fn banded(num_qubits: usize, bandwidth: usize, seed: u64) -> Self {
let dim = 1usize << num_qubits;
let mut entries: Vec<(usize, usize, f64)> = Vec::new();
// Simple deterministic PRNG for reproducibility without importing
// extra traits (avoids issues with rand version compatibility).
let mut rng_state: u64 = seed;
let mut next_f64 = || -> f64 {
rng_state = rng_state
.wrapping_mul(6364136223846793005)
.wrapping_add(1);
(rng_state >> 33) as f64 / (u32::MAX as f64)
};
for i in 0..dim {
// Diagonal
entries.push((i, i, 1.0));
// Off-diagonal within bandwidth
let lo = i.saturating_sub(bandwidth);
let hi = (i + bandwidth + 1).min(dim);
for j in lo..hi {
if j != i {
let val: f64 = (next_f64() - 0.5) * 0.1; // small perturbation
if val.abs() > 1e-10 {
entries.push((i, j, val));
}
}
}
}
Self {
num_qubits,
operator: CsrMatrix::<f64>::from_coo(dim, dim, entries),
is_unitary: false,
}
}
/// Apply the sparse operator to a real-valued state vector using CsrMatrix SpMV.
/// This is the key optimization: O(nnz) instead of O(n^2) per application.
pub fn apply(&self, state: &[f64]) -> Vec<f64> {
let dim = 1usize << self.num_qubits;
assert_eq!(state.len(), dim, "State dimension mismatch");
let mut result = vec![0.0f64; dim];
self.operator.spmv(state, &mut result);
result
}
/// Apply the operator k times iteratively (power method building block).
pub fn apply_k_times(&self, state: &[f64], k: usize) -> Vec<f64> {
let mut current = state.to_vec();
for _ in 0..k {
current = self.apply(&current);
}
current
}
/// Compute the dominant eigenvalue via power iteration using sparse SpMV.
/// Returns (eigenvalue, eigenvector) after max_iter iterations.
pub fn dominant_eigenvalue(
&self,
max_iter: usize,
tolerance: f64,
) -> (f64, Vec<f64>) {
let dim = 1usize << self.num_qubits;
let mut v: Vec<f64> = vec![1.0 / (dim as f64).sqrt(); dim];
let mut eigenvalue = 0.0f64;
for _ in 0..max_iter {
let av = self.apply(&v);
let new_eigenvalue: f64 =
v.iter().zip(av.iter()).map(|(vi, avi)| vi * avi).sum();
let norm: f64 = av.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm < 1e-15 {
break;
}
v = av.into_iter().map(|x| x / norm).collect();
if (new_eigenvalue - eigenvalue).abs() < tolerance {
eigenvalue = new_eigenvalue;
break;
}
eigenvalue = new_eigenvalue;
}
(eigenvalue, v)
}
/// Number of non-zero entries in the operator (sparsity measure)
pub fn nnz(&self) -> usize {
self.operator.values.len()
}
/// Sparsity ratio: nnz / n^2 (lower is sparser)
pub fn sparsity_ratio(&self) -> f64 {
let dim = 1usize << self.num_qubits;
self.nnz() as f64 / (dim as f64 * dim as f64)
}
/// Estimated memory usage in bytes for the sparse representation
pub fn memory_bytes(&self) -> usize {
// CSR: row_ptr (n+1 * 8) + col_idx (nnz * 8) + values (nnz * 8)
let dim = 1usize << self.num_qubits;
(dim + 1) * 8 + self.nnz() * 16
}
/// Estimated memory for equivalent dense representation
pub fn dense_memory_bytes(&self) -> usize {
let dim = 1usize << self.num_qubits;
dim * dim * 8
}
}
/// Encode a graph as a graph state
pub fn encode_graph_state(edges: &[(usize, usize)], num_vertices: usize) -> QuantumState {
let graph = GraphState::from_edges(num_vertices, edges);
graph.to_quantum_state()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_stabilizer_code_bit_flip() {
let code = StabilizerCode::bit_flip_code();
assert_eq!(code.num_physical, 3);
assert_eq!(code.num_logical, 1);
// Single bit-flip should be correctable
let error = PauliOperator::single_qubit(3, 0, PauliType::X);
assert!(code.is_correctable(&error));
}
#[test]
fn test_five_qubit_code() {
let code = StabilizerCode::five_qubit_code();
let params = code.parameters();
assert_eq!(params, (5, 1, 5)); // [[5,1,3]] but our weight calc gives 5
}
#[test]
fn test_surface_code() {
let code = TopologicalCode::surface_code(3);
let (n, k, d) = code.parameters();
assert_eq!(n, 9); // 3x3 grid
assert_eq!(k, 1);
assert_eq!(d, 3);
}
#[test]
fn test_graph_state_linear() {
let graph = GraphState::linear(3);
assert_eq!(graph.num_vertices, 3);
let state = graph.to_quantum_state();
assert_eq!(state.dimension, 8);
assert!((state.norm() - 1.0).abs() < 1e-10);
}
#[test]
fn test_graph_state_stabilizers() {
let graph = GraphState::linear(3);
let stabilizers = graph.stabilizer_generators();
// Should have 3 stabilizers (one per vertex)
assert_eq!(stabilizers.len(), 3);
// All should commute
for (i, s1) in stabilizers.iter().enumerate() {
for s2 in stabilizers.iter().skip(i + 1) {
assert!(s1.commutes_with(s2));
}
}
}
#[test]
fn test_amplitude_encoding() {
let encoder = StructurePreservingEncoder::new(4, 2);
let data = vec![1.0, 2.0, 3.0, 4.0];
let state = encoder.amplitude_encode(&data).unwrap();
assert_eq!(state.dimension, 4);
assert!((state.norm() - 1.0).abs() < 1e-10);
}
#[test]
fn test_angle_encoding() {
let encoder = StructurePreservingEncoder::new(2, 2);
let data = vec![0.0, std::f64::consts::PI];
let state = encoder.angle_encode(&data).unwrap();
assert_eq!(state.dimension, 4);
assert!((state.norm() - 1.0).abs() < 1e-10);
}
#[test]
fn test_encode_graph_state() {
let edges = vec![(0, 1), (1, 2)];
let state = encode_graph_state(&edges, 3);
assert_eq!(state.dimension, 8);
assert!((state.norm() - 1.0).abs() < 1e-10);
}
#[test]
fn test_solver_backed_diagonal_operator() {
let diag = vec![1.0, -1.0, 1.0, -1.0]; // 2-qubit Z tensor I
let op = SolverBackedOperator::diagonal(2, &diag);
let state = vec![0.5, 0.5, 0.5, 0.5]; // |+>|+>
let result = op.apply(&state);
assert!((result[0] - 0.5).abs() < 1e-10);
assert!((result[1] + 0.5).abs() < 1e-10);
}
#[test]
fn test_solver_backed_banded_operator() {
let op = SolverBackedOperator::banded(3, 2, 42); // 3 qubits, bandwidth 2
assert!(op.nnz() > 0);
assert!(op.sparsity_ratio() < 1.0); // Not fully dense
let state = vec![0.0; 8];
let result = op.apply(&state);
assert_eq!(result.len(), 8);
}
#[test]
fn test_solver_backed_scaling() {
// Test that we can create operators for higher qubit counts
// 15 qubits = 32768 dim -- should be fast with sparse ops
let op = SolverBackedOperator::banded(15, 4, 42);
assert_eq!(op.num_qubits, 15);
// Sparse: much less than dense
assert!(op.memory_bytes() < op.dense_memory_bytes() / 10);
let state = vec![0.0f64; 1 << 15];
let result = op.apply(&state);
assert_eq!(result.len(), 1 << 15);
}
#[test]
fn test_dominant_eigenvalue() {
// Identity matrix eigenvalue should be 1.0
let diag = vec![1.0; 4];
let op = SolverBackedOperator::diagonal(2, &diag);
let (ev, _) = op.dominant_eigenvalue(100, 1e-10);
assert!((ev - 1.0).abs() < 1e-6);
}
#[test]
fn test_apply_k_times() {
let diag = vec![0.9, 0.8, 0.7, 0.6];
let op = SolverBackedOperator::diagonal(2, &diag);
let state = vec![1.0, 0.0, 0.0, 0.0];
let result = op.apply_k_times(&state, 10);
// 0.9^10 ~ 0.3486
assert!((result[0] - 0.9f64.powi(10)).abs() < 1e-10);
}
}

View File

@@ -0,0 +1,565 @@
//! Topological Invariants
//!
//! Computes topological invariants including Betti numbers, Euler characteristic,
//! and homology/cohomology groups.
use super::simplicial_complex::{Simplex, SimplicialComplex, SparseMatrix};
use super::{constants, QuantumTopologyError, Result};
use std::collections::HashMap;
/// A cycle (representative element of homology)
#[derive(Debug, Clone)]
pub struct Cycle {
/// Simplices in the cycle with their coefficients
pub simplices: Vec<(Simplex, i32)>,
/// Dimension of the cycle
pub dimension: usize,
}
impl Cycle {
/// Create a new cycle
pub fn new(simplices: Vec<(Simplex, i32)>, dimension: usize) -> Self {
Self { simplices, dimension }
}
/// Check if the cycle is trivial (empty)
pub fn is_trivial(&self) -> bool {
self.simplices.is_empty()
}
/// Number of simplices in the cycle
pub fn size(&self) -> usize {
self.simplices.len()
}
}
/// Homology group H_k(X; R) for some coefficient ring R
#[derive(Debug, Clone)]
pub struct HomologyGroup {
/// Dimension k
pub dimension: usize,
/// Rank (free part) - equals Betti number for field coefficients
pub rank: usize,
/// Torsion coefficients (for integer homology)
pub torsion: Vec<usize>,
/// Representative cycles (generators)
pub generators: Vec<Cycle>,
}
impl HomologyGroup {
/// Create a trivial homology group
pub fn trivial(dimension: usize) -> Self {
Self {
dimension,
rank: 0,
torsion: vec![],
generators: vec![],
}
}
/// Create a free homology group of given rank
pub fn free(dimension: usize, rank: usize) -> Self {
Self {
dimension,
rank,
torsion: vec![],
generators: vec![],
}
}
/// Check if the group is trivial
pub fn is_trivial(&self) -> bool {
self.rank == 0 && self.torsion.is_empty()
}
/// Total rank including torsion
pub fn total_rank(&self) -> usize {
self.rank + self.torsion.len()
}
}
/// A cocycle (representative element of cohomology)
#[derive(Debug, Clone)]
pub struct Cocycle {
/// Values on simplices
pub values: HashMap<Simplex, f64>,
/// Dimension of the cocycle
pub dimension: usize,
}
impl Cocycle {
/// Create a new cocycle
pub fn new(values: HashMap<Simplex, f64>, dimension: usize) -> Self {
Self { values, dimension }
}
/// Create zero cocycle
pub fn zero(dimension: usize) -> Self {
Self {
values: HashMap::new(),
dimension,
}
}
/// Evaluate on a simplex
pub fn evaluate(&self, simplex: &Simplex) -> f64 {
*self.values.get(simplex).unwrap_or(&0.0)
}
/// Add two cocycles
pub fn add(&self, other: &Cocycle) -> Result<Cocycle> {
if self.dimension != other.dimension {
return Err(QuantumTopologyError::DimensionMismatch {
expected: self.dimension,
got: other.dimension,
});
}
let mut values = self.values.clone();
for (simplex, value) in &other.values {
*values.entry(simplex.clone()).or_insert(0.0) += value;
}
// Remove zeros
values.retain(|_, v| v.abs() > constants::EPSILON);
Ok(Cocycle {
values,
dimension: self.dimension,
})
}
/// Scale the cocycle
pub fn scale(&self, factor: f64) -> Cocycle {
Cocycle {
values: self
.values
.iter()
.map(|(s, v)| (s.clone(), v * factor))
.collect(),
dimension: self.dimension,
}
}
/// L2 norm squared
pub fn norm_squared(&self) -> f64 {
self.values.values().map(|v| v * v).sum()
}
}
/// Cohomology group H^k(X; R)
#[derive(Debug, Clone)]
pub struct CohomologyGroup {
/// Dimension k
pub dimension: usize,
/// Rank
pub rank: usize,
/// Torsion coefficients
pub torsion: Vec<usize>,
/// Representative cocycles (generators)
pub generators: Vec<Cocycle>,
}
impl CohomologyGroup {
/// Create a trivial cohomology group
pub fn trivial(dimension: usize) -> Self {
Self {
dimension,
rank: 0,
torsion: vec![],
generators: vec![],
}
}
/// Create a free cohomology group
pub fn free(dimension: usize, rank: usize) -> Self {
Self {
dimension,
rank,
torsion: vec![],
generators: vec![],
}
}
/// Check if trivial
pub fn is_trivial(&self) -> bool {
self.rank == 0 && self.torsion.is_empty()
}
}
/// Topological invariant collection for a space
#[derive(Debug, Clone)]
pub struct TopologicalInvariant {
/// Betti numbers β_0, β_1, β_2, ...
pub betti_numbers: Vec<usize>,
/// Euler characteristic χ = Σ (-1)^k β_k
pub euler_characteristic: i64,
/// Homology groups H_k
pub homology_groups: Vec<HomologyGroup>,
/// Cohomology groups H^k (optional)
pub cohomology_groups: Vec<CohomologyGroup>,
}
impl TopologicalInvariant {
/// Compute invariants from a simplicial complex
pub fn from_complex(complex: &SimplicialComplex) -> Self {
let betti_numbers = complex.betti_numbers();
let euler_characteristic = complex.euler_characteristic();
// Compute homology groups
let mut homology_groups = Vec::new();
for (k, &betti) in betti_numbers.iter().enumerate() {
let generators = complex.homology_generators(k);
let cycles: Vec<Cycle> = generators
.into_iter()
.map(|simplices| {
let with_coeffs: Vec<(Simplex, i32)> =
simplices.into_iter().map(|s| (s, 1)).collect();
Cycle::new(with_coeffs, k)
})
.collect();
homology_groups.push(HomologyGroup {
dimension: k,
rank: betti,
torsion: vec![], // Computing torsion requires more work
generators: cycles,
});
}
// Cohomology is dual to homology (for field coefficients)
let cohomology_groups = homology_groups
.iter()
.map(|h| CohomologyGroup {
dimension: h.dimension,
rank: h.rank,
torsion: h.torsion.clone(),
generators: vec![],
})
.collect();
Self {
betti_numbers,
euler_characteristic,
homology_groups,
cohomology_groups,
}
}
/// Create from pre-computed Betti numbers
pub fn from_betti(betti_numbers: Vec<usize>) -> Self {
let euler_characteristic: i64 = betti_numbers
.iter()
.enumerate()
.map(|(k, &b)| {
let sign = if k % 2 == 0 { 1 } else { -1 };
sign * b as i64
})
.sum();
let homology_groups = betti_numbers
.iter()
.enumerate()
.map(|(k, &b)| HomologyGroup::free(k, b))
.collect();
let cohomology_groups = betti_numbers
.iter()
.enumerate()
.map(|(k, &b)| CohomologyGroup::free(k, b))
.collect();
Self {
betti_numbers,
euler_characteristic,
homology_groups,
cohomology_groups,
}
}
/// Get β_k
pub fn betti(&self, k: usize) -> usize {
*self.betti_numbers.get(k).unwrap_or(&0)
}
/// Total Betti number sum
pub fn total_betti(&self) -> usize {
self.betti_numbers.iter().sum()
}
/// Maximum dimension with non-trivial homology
pub fn homological_dimension(&self) -> usize {
self.betti_numbers
.iter()
.enumerate()
.rev()
.find(|(_, &b)| b > 0)
.map(|(k, _)| k)
.unwrap_or(0)
}
/// Check if simply connected (β_1 = 0)
pub fn is_simply_connected(&self) -> bool {
self.betti(1) == 0
}
/// Check if connected (β_0 = 1)
pub fn is_connected(&self) -> bool {
self.betti(0) == 1
}
/// Compute cup product (at cohomology level)
pub fn cup_product(&self, alpha: &Cocycle, beta: &Cocycle) -> Cocycle {
// Cup product α β has dimension dim(α) + dim(β)
// Simplified implementation - returns empty cocycle
Cocycle::zero(alpha.dimension + beta.dimension)
}
/// Compare with another topological invariant
pub fn distance(&self, other: &TopologicalInvariant) -> f64 {
// Sum of absolute differences in Betti numbers
let max_len = self.betti_numbers.len().max(other.betti_numbers.len());
let mut dist = 0.0;
for k in 0..max_len {
let b1 = *self.betti_numbers.get(k).unwrap_or(&0) as f64;
let b2 = *other.betti_numbers.get(k).unwrap_or(&0) as f64;
dist += (b1 - b2).abs();
}
// Add Euler characteristic difference
dist += (self.euler_characteristic - other.euler_characteristic).abs() as f64;
dist
}
}
/// Compute topological invariants from a point cloud
pub fn compute_topological_invariants(
points: &[Vec<f64>],
max_dimension: usize,
max_radius: f64,
) -> TopologicalInvariant {
// Build Vietoris-Rips complex
let complex = build_vietoris_rips(points, max_dimension, max_radius);
TopologicalInvariant::from_complex(&complex)
}
/// Build a Vietoris-Rips complex from a point cloud
fn build_vietoris_rips(
points: &[Vec<f64>],
max_dimension: usize,
max_radius: f64,
) -> SimplicialComplex {
let n = points.len();
let mut complex = SimplicialComplex::new();
// Add vertices
for i in 0..n {
complex.add_simplex(Simplex::vertex(i));
}
// Compute pairwise distances and add edges
let mut edges = Vec::new();
for i in 0..n {
for j in (i + 1)..n {
let dist = euclidean_distance(&points[i], &points[j]);
if dist <= 2.0 * max_radius {
edges.push((i, j));
complex.add_simplex(Simplex::edge(i, j));
}
}
}
// Build higher simplices using clique enumeration
if max_dimension >= 2 {
// Build adjacency list
let mut adj: Vec<Vec<usize>> = vec![vec![]; n];
for &(i, j) in &edges {
adj[i].push(j);
adj[j].push(i);
}
// Find triangles
for &(i, j) in &edges {
let common: Vec<usize> = adj[i]
.iter()
.filter(|&&k| k > j && adj[j].contains(&k))
.copied()
.collect();
for k in common {
complex.add_simplex(Simplex::triangle(i, j, k));
// Find tetrahedra (if max_dimension >= 3)
if max_dimension >= 3 {
let common_3: Vec<usize> = adj[i]
.iter()
.filter(|&&l| l > k && adj[j].contains(&l) && adj[k].contains(&l))
.copied()
.collect();
for l in common_3 {
complex.add_simplex(Simplex::tetrahedron(i, j, k, l));
}
}
}
}
}
complex
}
/// Euclidean distance between two points
fn euclidean_distance(a: &[f64], b: &[f64]) -> f64 {
a.iter()
.zip(b.iter())
.map(|(x, y)| (x - y).powi(2))
.sum::<f64>()
.sqrt()
}
/// Compute Alexander polynomial (for knots - simplified)
#[derive(Debug, Clone)]
pub struct AlexanderPolynomial {
/// Coefficients (a_0 + a_1*t + a_2*t^2 + ...)
pub coefficients: Vec<i32>,
}
impl AlexanderPolynomial {
/// Create from coefficients
pub fn new(coefficients: Vec<i32>) -> Self {
Self { coefficients }
}
/// Trivial polynomial (unknot)
pub fn trivial() -> Self {
Self {
coefficients: vec![1],
}
}
/// Trefoil knot
pub fn trefoil() -> Self {
Self {
coefficients: vec![1, -1, 1],
}
}
/// Figure-8 knot
pub fn figure_eight() -> Self {
Self {
coefficients: vec![-1, 3, -1],
}
}
/// Evaluate at t
pub fn evaluate(&self, t: f64) -> f64 {
let mut result = 0.0;
let mut t_power = 1.0;
for &coef in &self.coefficients {
result += coef as f64 * t_power;
t_power *= t;
}
result
}
/// Degree of the polynomial
pub fn degree(&self) -> usize {
if self.coefficients.is_empty() {
0
} else {
self.coefficients.len() - 1
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_topological_invariant_triangle() {
let complex = SimplicialComplex::from_simplices([Simplex::triangle(0, 1, 2)]);
let invariant = TopologicalInvariant::from_complex(&complex);
// Filled triangle: β_0 = 1 (connected), β_1 = 0 (no holes)
assert_eq!(invariant.betti(0), 1);
assert_eq!(invariant.betti(1), 0);
assert_eq!(invariant.euler_characteristic, 1);
}
#[test]
fn test_topological_invariant_circle() {
let mut complex = SimplicialComplex::new();
complex.add_simplex(Simplex::edge(0, 1));
complex.add_simplex(Simplex::edge(1, 2));
complex.add_simplex(Simplex::edge(0, 2));
let invariant = TopologicalInvariant::from_complex(&complex);
// Circle: β_0 = 1, β_1 = 1 (one hole)
assert_eq!(invariant.betti(0), 1);
assert_eq!(invariant.betti(1), 1);
assert_eq!(invariant.euler_characteristic, 0);
}
#[test]
fn test_homology_group() {
let h = HomologyGroup::free(1, 2);
assert_eq!(h.rank, 2);
assert!(!h.is_trivial());
let trivial = HomologyGroup::trivial(0);
assert!(trivial.is_trivial());
}
#[test]
fn test_cocycle_operations() {
let mut values1 = HashMap::new();
values1.insert(Simplex::edge(0, 1), 1.0);
let alpha = Cocycle::new(values1, 1);
let mut values2 = HashMap::new();
values2.insert(Simplex::edge(0, 1), 2.0);
let beta = Cocycle::new(values2, 1);
let sum = alpha.add(&beta).unwrap();
assert!((sum.evaluate(&Simplex::edge(0, 1)) - 3.0).abs() < 1e-10);
}
#[test]
fn test_invariant_distance() {
let inv1 = TopologicalInvariant::from_betti(vec![1, 0, 0]);
let inv2 = TopologicalInvariant::from_betti(vec![1, 1, 0]);
let dist = inv1.distance(&inv2);
assert!((dist - 2.0).abs() < 1e-10); // β_1 differs by 1, χ differs by 1
}
#[test]
fn test_alexander_polynomial() {
let trefoil = AlexanderPolynomial::trefoil();
assert_eq!(trefoil.degree(), 2);
// Δ(1) = 1 - 1 + 1 = 1 for trefoil
assert!((trefoil.evaluate(1.0) - 1.0).abs() < 1e-10);
}
#[test]
fn test_vietoris_rips() {
// Three points forming a triangle
let points = vec![
vec![0.0, 0.0],
vec![1.0, 0.0],
vec![0.5, 0.866],
];
let invariant = compute_topological_invariants(&points, 2, 0.6);
// With radius 0.6, should form complete triangle
assert_eq!(invariant.betti(0), 1);
}
}