Squashed 'vendor/ruvector/' content from commit b64c2172

git-subtree-dir: vendor/ruvector
git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
commit d803bfe2b1
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,472 @@
//! Unified simulation backend trait and automatic backend selection.
//!
//! ruqu-core supports multiple simulation backends, each optimal for
//! different circuit structures:
//!
//! | Backend | Qubits | Best for |
//! |---------|--------|----------|
//! | StateVector | up to ~32 | General circuits, exact simulation |
//! | Stabilizer | millions | Clifford circuits + measurement |
//! | TensorNetwork | hundreds-thousands | Low-depth, local connectivity |
use crate::circuit::QuantumCircuit;
use crate::gate::Gate;
// ---------------------------------------------------------------------------
// Backend type enum
// ---------------------------------------------------------------------------
/// Which backend to use for simulation.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BackendType {
/// Dense state-vector (exact, up to ~32 qubits).
StateVector,
/// Aaronson-Gottesman stabilizer tableau (Clifford-only, millions of qubits).
Stabilizer,
/// Matrix Product State tensor network (bounded entanglement, hundreds+).
TensorNetwork,
/// Clifford+T stabilizer rank decomposition (moderate T-count, many qubits).
CliffordT,
/// Automatically select the best backend based on circuit analysis.
Auto,
}
// ---------------------------------------------------------------------------
// Circuit analysis result
// ---------------------------------------------------------------------------
/// Result of circuit analysis, used for backend selection.
///
/// Produced by [`analyze_circuit`] and contains both raw statistics about the
/// circuit (gate counts, depth, connectivity) and a recommended backend with
/// a confidence score and human-readable explanation.
#[derive(Debug, Clone)]
pub struct CircuitAnalysis {
/// Number of qubits in the circuit.
pub num_qubits: u32,
/// Total number of gates.
pub total_gates: usize,
/// Number of Clifford gates (H, S, CNOT, CZ, SWAP, X, Y, Z, Sdg).
pub clifford_gates: usize,
/// Number of non-Clifford gates (T, Tdg, Rx, Ry, Rz, Phase, Rzz, Unitary1Q).
pub non_clifford_gates: usize,
/// Fraction of unitary gates that are Clifford (0.0 to 1.0).
pub clifford_fraction: f64,
/// Number of measurement gates.
pub measurement_gates: usize,
/// Circuit depth (longest qubit timeline).
pub depth: u32,
/// Maximum qubit distance in any two-qubit gate.
pub max_connectivity: u32,
/// Whether all two-qubit gates are between adjacent qubits.
pub is_nearest_neighbor: bool,
/// Recommended backend based on the analysis heuristics.
pub recommended_backend: BackendType,
/// Confidence in the recommendation (0.0 to 1.0).
pub confidence: f64,
/// Human-readable explanation of the recommendation.
pub explanation: String,
}
// ---------------------------------------------------------------------------
// Public analysis entry point
// ---------------------------------------------------------------------------
/// Analyze a quantum circuit to determine the optimal simulation backend.
///
/// Walks the gate list once to collect statistics, then applies a series of
/// heuristic rules to recommend a [`BackendType`]. The returned
/// [`CircuitAnalysis`] contains both the raw numbers and the recommendation.
///
/// # Example
///
/// ```
/// use ruqu_core::circuit::QuantumCircuit;
/// use ruqu_core::backend::{analyze_circuit, BackendType};
///
/// // A small circuit with a non-Clifford gate routes to StateVector.
/// let mut circ = QuantumCircuit::new(3);
/// circ.h(0).t(1).cnot(0, 1);
/// let analysis = analyze_circuit(&circ);
/// assert_eq!(analysis.recommended_backend, BackendType::StateVector);
/// ```
pub fn analyze_circuit(circuit: &QuantumCircuit) -> CircuitAnalysis {
let num_qubits = circuit.num_qubits();
let gates = circuit.gates();
let total_gates = gates.len();
let mut clifford_gates = 0usize;
let mut non_clifford_gates = 0usize;
let mut measurement_gates = 0usize;
let mut max_connectivity: u32 = 0;
let mut is_nearest_neighbor = true;
for gate in gates {
match gate {
// Clifford gates
Gate::H(_)
| Gate::X(_)
| Gate::Y(_)
| Gate::Z(_)
| Gate::S(_)
| Gate::Sdg(_)
| Gate::CNOT(_, _)
| Gate::CZ(_, _)
| Gate::SWAP(_, _) => {
clifford_gates += 1;
}
// Non-Clifford gates
Gate::T(_)
| Gate::Tdg(_)
| Gate::Rx(_, _)
| Gate::Ry(_, _)
| Gate::Rz(_, _)
| Gate::Phase(_, _)
| Gate::Rzz(_, _, _)
| Gate::Unitary1Q(_, _) => {
non_clifford_gates += 1;
}
Gate::Measure(_) => {
measurement_gates += 1;
}
Gate::Reset(_) | Gate::Barrier => {}
}
// Check connectivity for two-qubit gates.
let qubits = gate.qubits();
if qubits.len() == 2 {
let dist = if qubits[0] > qubits[1] {
qubits[0] - qubits[1]
} else {
qubits[1] - qubits[0]
};
if dist > max_connectivity {
max_connectivity = dist;
}
if dist > 1 {
is_nearest_neighbor = false;
}
}
}
let unitary_gates = clifford_gates + non_clifford_gates;
let clifford_fraction = if unitary_gates > 0 {
clifford_gates as f64 / unitary_gates as f64
} else {
1.0
};
let depth = circuit.depth();
// Decide which backend fits best.
let (recommended_backend, confidence, explanation) = select_backend(
num_qubits,
clifford_fraction,
non_clifford_gates,
depth,
is_nearest_neighbor,
max_connectivity,
);
CircuitAnalysis {
num_qubits,
total_gates,
clifford_gates,
non_clifford_gates,
clifford_fraction,
measurement_gates,
depth,
max_connectivity,
is_nearest_neighbor,
recommended_backend,
confidence,
explanation,
}
}
// ---------------------------------------------------------------------------
// Internal selection heuristics
// ---------------------------------------------------------------------------
/// Internal backend selection logic.
///
/// Returns `(backend, confidence, explanation)` based on a priority-ordered
/// set of heuristic rules.
fn select_backend(
num_qubits: u32,
clifford_fraction: f64,
non_clifford_gates: usize,
depth: u32,
is_nearest_neighbor: bool,
max_connectivity: u32,
) -> (BackendType, f64, String) {
// Rule 1: Pure Clifford circuits -> Stabilizer (any size).
if clifford_fraction >= 1.0 {
return (
BackendType::Stabilizer,
0.99,
format!(
"Pure Clifford circuit: stabilizer backend handles {} qubits in O(n^2) per gate",
num_qubits
),
);
}
// Rule 2: Mostly Clifford with very few non-Clifford gates and too many
// qubits for state vector -> Stabilizer with approximate decomposition.
if clifford_fraction >= 0.95 && num_qubits > 32 && non_clifford_gates <= 10 {
return (
BackendType::Stabilizer,
0.85,
format!(
"{}% Clifford with only {} non-Clifford gates: \
stabilizer backend recommended for {} qubits",
(clifford_fraction * 100.0) as u32,
non_clifford_gates,
num_qubits
),
);
}
// Rule 3: Small enough for state vector -> use it (exact, comfortable).
if num_qubits <= 25 {
return (
BackendType::StateVector,
0.95,
format!(
"{} qubits fits comfortably in state vector ({})",
num_qubits,
format_memory(num_qubits)
),
);
}
// Rule 4: State vector possible but tight on memory.
if num_qubits <= 32 {
return (
BackendType::StateVector,
0.80,
format!(
"{} qubits requires {} for state vector - verify available memory",
num_qubits,
format_memory(num_qubits)
),
);
}
// Rule 5: Low depth, local connectivity -> tensor network.
if is_nearest_neighbor && depth < num_qubits * 2 {
return (
BackendType::TensorNetwork,
0.85,
format!(
"Nearest-neighbor connectivity with depth {} on {} qubits: \
tensor network efficient",
depth, num_qubits
),
);
}
// Rule 6: General large circuit -> tensor network as best approximation.
if num_qubits > 32 {
let conf = if is_nearest_neighbor { 0.75 } else { 0.55 };
return (
BackendType::TensorNetwork,
conf,
format!(
"{} qubits exceeds state vector capacity. \
Tensor network with connectivity {} - results are approximate",
num_qubits, max_connectivity
),
);
}
// Fallback: exact state vector simulation.
(
BackendType::StateVector,
0.70,
"Default to exact state vector simulation".into(),
)
}
// ---------------------------------------------------------------------------
// Memory formatting helper
// ---------------------------------------------------------------------------
/// Format the state-vector memory requirement for a given qubit count.
///
/// Each amplitude is a `Complex` (16 bytes), and there are `2^n` of them.
fn format_memory(num_qubits: u32) -> String {
// Use u128 to avoid overflow for up to 127 qubits.
let bytes = (1u128 << num_qubits) * 16;
if bytes >= 1 << 40 {
format!("{:.1} TiB", bytes as f64 / (1u128 << 40) as f64)
} else if bytes >= 1 << 30 {
format!("{:.1} GiB", bytes as f64 / (1u128 << 30) as f64)
} else if bytes >= 1 << 20 {
format!("{:.1} MiB", bytes as f64 / (1u128 << 20) as f64)
} else {
format!("{} bytes", bytes)
}
}
// ---------------------------------------------------------------------------
// Scaling information
// ---------------------------------------------------------------------------
/// Scaling characteristics for a single simulation backend.
#[derive(Debug, Clone)]
pub struct ScalingInfo {
/// The backend this info describes.
pub backend: BackendType,
/// Maximum qubits for exact (zero-error) simulation.
pub max_qubits_exact: u32,
/// Maximum qubits for approximate simulation with truncation.
pub max_qubits_approximate: u32,
/// Time complexity in big-O notation.
pub time_complexity: String,
/// Space complexity in big-O notation.
pub space_complexity: String,
}
/// Get scaling information for all supported backends.
///
/// Returns a `Vec` with one [`ScalingInfo`] per backend (StateVector,
/// Stabilizer, TensorNetwork, CliffordT) in that order.
pub fn scaling_report() -> Vec<ScalingInfo> {
vec![
ScalingInfo {
backend: BackendType::StateVector,
max_qubits_exact: 32,
max_qubits_approximate: 36,
time_complexity: "O(2^n * gates)".into(),
space_complexity: "O(2^n)".into(),
},
ScalingInfo {
backend: BackendType::Stabilizer,
max_qubits_exact: 10_000_000,
max_qubits_approximate: 10_000_000,
time_complexity: "O(n^2 * gates) for Clifford".into(),
space_complexity: "O(n^2)".into(),
},
ScalingInfo {
backend: BackendType::TensorNetwork,
max_qubits_exact: 100,
max_qubits_approximate: 10_000,
time_complexity: "O(n * chi^3 * gates)".into(),
space_complexity: "O(n * chi^2)".into(),
},
ScalingInfo {
backend: BackendType::CliffordT,
max_qubits_exact: 1000,
max_qubits_approximate: 10_000,
time_complexity: "O(2^t * n^2 * gates) for t T-gates".into(),
space_complexity: "O(2^t * n^2)".into(),
},
]
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::circuit::QuantumCircuit;
#[test]
fn pure_clifford_selects_stabilizer() {
let mut circ = QuantumCircuit::new(50);
for q in 0..50 {
circ.h(q);
}
for q in 0..49 {
circ.cnot(q, q + 1);
}
let analysis = analyze_circuit(&circ);
assert_eq!(analysis.recommended_backend, BackendType::Stabilizer);
assert!(analysis.clifford_fraction >= 1.0);
assert!(analysis.confidence > 0.9);
}
#[test]
fn small_circuit_selects_state_vector() {
let mut circ = QuantumCircuit::new(5);
circ.h(0).t(1).cnot(0, 1);
let analysis = analyze_circuit(&circ);
assert_eq!(analysis.recommended_backend, BackendType::StateVector);
assert!(analysis.confidence > 0.9);
}
#[test]
fn medium_circuit_selects_state_vector() {
let mut circ = QuantumCircuit::new(30);
circ.h(0).rx(1, 1.0).cnot(0, 1);
let analysis = analyze_circuit(&circ);
assert_eq!(analysis.recommended_backend, BackendType::StateVector);
assert!(analysis.confidence >= 0.80);
}
#[test]
fn large_nearest_neighbor_selects_tensor_network() {
let mut circ = QuantumCircuit::new(64);
// Low depth, nearest-neighbor only.
for q in 0..63 {
circ.cnot(q, q + 1);
}
// Add enough non-Clifford gates to avoid the "mostly Clifford" Rule 2
// (which requires non_clifford_gates <= 10).
for q in 0..12 {
circ.t(q);
}
let analysis = analyze_circuit(&circ);
assert_eq!(analysis.recommended_backend, BackendType::TensorNetwork);
}
#[test]
fn empty_circuit_defaults() {
let circ = QuantumCircuit::new(10);
let analysis = analyze_circuit(&circ);
// Empty circuit is "pure Clifford" (no non-Clifford gates).
assert_eq!(analysis.total_gates, 0);
assert!(analysis.clifford_fraction >= 1.0);
}
#[test]
fn measurement_counted() {
let mut circ = QuantumCircuit::new(3);
circ.h(0).measure(0).measure(1).measure(2);
let analysis = analyze_circuit(&circ);
assert_eq!(analysis.measurement_gates, 3);
}
#[test]
fn connectivity_detected() {
let mut circ = QuantumCircuit::new(10);
circ.cnot(0, 5); // distance = 5
let analysis = analyze_circuit(&circ);
assert_eq!(analysis.max_connectivity, 5);
assert!(!analysis.is_nearest_neighbor);
}
#[test]
fn scaling_report_has_four_entries() {
let report = scaling_report();
assert_eq!(report.len(), 4);
assert_eq!(report[0].backend, BackendType::StateVector);
assert_eq!(report[1].backend, BackendType::Stabilizer);
assert_eq!(report[2].backend, BackendType::TensorNetwork);
assert_eq!(report[3].backend, BackendType::CliffordT);
}
#[test]
fn format_memory_values() {
// 10 qubits => 2^10 * 16 = 16384 bytes
assert_eq!(format_memory(10), "16384 bytes");
// 20 qubits => 2^20 * 16 = 16 MiB
assert_eq!(format_memory(20), "16.0 MiB");
// 30 qubits => 2^30 * 16 = 16 GiB
assert_eq!(format_memory(30), "16.0 GiB");
}
}

View File

@@ -0,0 +1,806 @@
//! Comprehensive benchmark and proof suite for ruqu-core's four flagship
//! capabilities: cost-model routing, entanglement budgeting, adaptive
//! decoding, and cross-backend certification.
//!
//! All benchmarks are deterministic (seeded RNG) and self-contained,
//! using only `rand` and `std` beyond crate-internal imports.
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use std::time::Instant;
use crate::backend::{analyze_circuit, BackendType};
use crate::circuit::QuantumCircuit;
use crate::confidence::total_variation_distance;
use crate::decoder::{
PartitionedDecoder, StabilizerMeasurement, SurfaceCodeDecoder, SyndromeData, UnionFindDecoder,
};
use crate::decomposition::{classify_segment, decompose, estimate_segment_cost};
use crate::planner::{plan_execution, PlannerConfig};
use crate::simulator::Simulator;
use crate::verification::{is_clifford_circuit, run_stabilizer_shots};
// ---------------------------------------------------------------------------
// Proof 1: Routing benchmark
// ---------------------------------------------------------------------------
/// Result for a single circuit's routing comparison.
pub struct RoutingResult {
pub circuit_id: usize,
pub num_qubits: u32,
pub depth: u32,
pub t_count: u32,
pub naive_time_ns: u64,
pub heuristic_time_ns: u64,
pub planner_time_ns: u64,
pub planner_backend: String,
pub speedup_vs_naive: f64,
pub speedup_vs_heuristic: f64,
}
/// Aggregated routing benchmark across many circuits.
pub struct RoutingBenchmark {
pub num_circuits: usize,
pub results: Vec<RoutingResult>,
}
impl RoutingBenchmark {
/// Percentage of circuits where the cost-model planner matches or beats
/// the naive selector on predicted runtime.
pub fn planner_win_rate_vs_naive(&self) -> f64 {
if self.results.is_empty() {
return 0.0;
}
let wins = self
.results
.iter()
.filter(|r| r.planner_time_ns <= r.naive_time_ns)
.count();
wins as f64 / self.results.len() as f64 * 100.0
}
/// Median speedup of planner vs naive.
pub fn median_speedup_vs_naive(&self) -> f64 {
if self.results.is_empty() {
return 1.0;
}
let mut speedups: Vec<f64> = self.results.iter().map(|r| r.speedup_vs_naive).collect();
speedups.sort_by(|a, b| a.partial_cmp(b).unwrap());
speedups[speedups.len() / 2]
}
}
/// Simulate the predicted runtime (nanoseconds) for a circuit on a specific
/// backend, using the planner's cost model.
fn predicted_runtime_ns(circuit: &QuantumCircuit, backend: BackendType) -> u64 {
let analysis = analyze_circuit(circuit);
let n = analysis.num_qubits;
let gates = analysis.total_gates;
match backend {
BackendType::Stabilizer => {
let ns = (n as f64) * (n as f64) * (gates as f64) * 0.1;
ns as u64
}
BackendType::StateVector => {
if n >= 64 {
return u64::MAX;
}
let base = (1u64 << n) as f64 * gates as f64 * 4.0;
let scaling = if n > 25 {
2.0_f64.powi((n - 25) as i32)
} else {
1.0
};
(base * scaling) as u64
}
BackendType::TensorNetwork => {
let chi = 64.0_f64;
let ns = (n as f64) * chi * chi * chi * (gates as f64) * 2.0;
ns as u64
}
BackendType::CliffordT => {
// 2^t stabiliser terms, each O(n^2) per gate.
let t = analysis.non_clifford_gates as u32;
let terms = 1u64.checked_shl(t).unwrap_or(u64::MAX);
let flops_per_gate = 4 * (n as u64) * (n as u64);
let ns = terms as f64 * flops_per_gate as f64 * gates as f64 * 0.1;
ns as u64
}
BackendType::Auto => {
let plan = plan_execution(circuit, &PlannerConfig::default());
predicted_runtime_ns(circuit, plan.backend)
}
}
}
/// Naive selector: always picks StateVector.
fn naive_select(_circuit: &QuantumCircuit) -> BackendType {
BackendType::StateVector
}
/// Simple heuristic: Clifford fraction > 0.95 => Stabilizer, else StateVector.
fn heuristic_select(circuit: &QuantumCircuit) -> BackendType {
let analysis = analyze_circuit(circuit);
if analysis.clifford_fraction > 0.95 {
BackendType::Stabilizer
} else {
BackendType::StateVector
}
}
/// Run the routing benchmark: generate diverse circuits, route through
/// three selectors, and compare predicted runtimes.
pub fn run_routing_benchmark(seed: u64, num_circuits: usize) -> RoutingBenchmark {
let mut rng = StdRng::seed_from_u64(seed);
let config = PlannerConfig::default();
let mut results = Vec::with_capacity(num_circuits);
for id in 0..num_circuits {
let kind = id % 5;
let circuit = match kind {
0 => gen_clifford_circuit(&mut rng),
1 => gen_low_t_circuit(&mut rng),
2 => gen_high_t_circuit(&mut rng),
3 => gen_large_nn_circuit(&mut rng),
_ => gen_mixed_circuit(&mut rng),
};
let analysis = analyze_circuit(&circuit);
let t_count = analysis.non_clifford_gates as u32;
let depth = circuit.depth();
let num_qubits = circuit.num_qubits();
let plan = plan_execution(&circuit, &config);
let planner_backend = plan.backend;
let naive_backend = naive_select(&circuit);
let heuristic_backend = heuristic_select(&circuit);
let planner_time = predicted_runtime_ns(&circuit, planner_backend);
let naive_time = predicted_runtime_ns(&circuit, naive_backend);
let heuristic_time = predicted_runtime_ns(&circuit, heuristic_backend);
let speedup_naive = if planner_time > 0 {
naive_time as f64 / planner_time as f64
} else {
1.0
};
let speedup_heuristic = if planner_time > 0 {
heuristic_time as f64 / planner_time as f64
} else {
1.0
};
results.push(RoutingResult {
circuit_id: id,
num_qubits,
depth,
t_count,
naive_time_ns: naive_time,
heuristic_time_ns: heuristic_time,
planner_time_ns: planner_time,
planner_backend: format!("{:?}", planner_backend),
speedup_vs_naive: speedup_naive,
speedup_vs_heuristic: speedup_heuristic,
});
}
RoutingBenchmark {
num_circuits,
results,
}
}
// ---------------------------------------------------------------------------
// Circuit generators (kept minimal to stay under 500 lines)
// ---------------------------------------------------------------------------
fn gen_clifford_circuit(rng: &mut StdRng) -> QuantumCircuit {
let n = rng.gen_range(2..=60);
let mut circ = QuantumCircuit::new(n);
for q in 0..n {
circ.h(q);
}
let gates = rng.gen_range(n..n * 3);
for _ in 0..gates {
let q1 = rng.gen_range(0..n);
let q2 = (q1 + 1) % n;
circ.cnot(q1, q2);
}
circ
}
fn gen_low_t_circuit(rng: &mut StdRng) -> QuantumCircuit {
let n = rng.gen_range(4..=20);
let mut circ = QuantumCircuit::new(n);
for q in 0..n {
circ.h(q);
}
for q in 0..(n - 1) {
circ.cnot(q, q + 1);
}
let t_count = rng.gen_range(1..=3);
for _ in 0..t_count {
circ.t(rng.gen_range(0..n));
}
circ
}
fn gen_high_t_circuit(rng: &mut StdRng) -> QuantumCircuit {
let n = rng.gen_range(3..=15);
let mut circ = QuantumCircuit::new(n);
let depth = rng.gen_range(5..20);
for _ in 0..depth {
for q in 0..n {
if rng.gen_bool(0.5) {
circ.t(q);
} else {
circ.h(q);
}
}
if n > 1 {
let q1 = rng.gen_range(0..n - 1);
circ.cnot(q1, q1 + 1);
}
}
circ
}
fn gen_large_nn_circuit(rng: &mut StdRng) -> QuantumCircuit {
let n = rng.gen_range(40..=100);
let mut circ = QuantumCircuit::new(n);
for q in 0..(n - 1) {
circ.cnot(q, q + 1);
}
let t_count = rng.gen_range(15..30);
for _ in 0..t_count {
circ.t(rng.gen_range(0..n));
}
circ
}
fn gen_mixed_circuit(rng: &mut StdRng) -> QuantumCircuit {
let n = rng.gen_range(5..=25);
let mut circ = QuantumCircuit::new(n);
let layers = rng.gen_range(3..10);
for _ in 0..layers {
for q in 0..n {
match rng.gen_range(0..4) {
0 => {
circ.h(q);
}
1 => {
circ.t(q);
}
2 => {
circ.s(q);
}
_ => {
circ.x(q);
}
}
}
if n > 1 {
let q1 = rng.gen_range(0..n - 1);
circ.cnot(q1, q1 + 1);
}
}
circ
}
// ---------------------------------------------------------------------------
// Proof 2: Entanglement budget benchmark
// ---------------------------------------------------------------------------
/// Results from the entanglement budget verification.
pub struct EntanglementBudgetBenchmark {
pub circuits_tested: usize,
pub segments_total: usize,
pub segments_within_budget: usize,
pub max_violation: f64,
pub decomposition_overhead_pct: f64,
}
/// Run the entanglement budget benchmark: decompose circuits into segments
/// and verify each segment's estimated entanglement stays within budget.
pub fn run_entanglement_benchmark(seed: u64, num_circuits: usize) -> EntanglementBudgetBenchmark {
let mut rng = StdRng::seed_from_u64(seed);
let mut segments_total = 0usize;
let mut segments_within = 0usize;
let mut max_violation = 0.0_f64;
let max_segment_qubits = 25;
let mut baseline_cost = 0u64;
let mut decomposed_cost = 0u64;
for _ in 0..num_circuits {
let circuit = gen_entanglement_circuit(&mut rng);
// Baseline cost: whole circuit on a single backend.
let base_backend = classify_segment(&circuit);
let base_seg = estimate_segment_cost(&circuit, base_backend);
baseline_cost += base_seg.estimated_flops;
// Decomposed cost: sum of segment costs.
let partition = decompose(&circuit, max_segment_qubits);
for seg in &partition.segments {
segments_total += 1;
decomposed_cost += seg.estimated_cost.estimated_flops;
// Check entanglement budget: the segment qubit count should
// not exceed the max_segment_qubits threshold.
let active = seg.circuit.num_qubits();
if active <= max_segment_qubits {
segments_within += 1;
} else {
let violation = (active - max_segment_qubits) as f64 / max_segment_qubits as f64;
if violation > max_violation {
max_violation = violation;
}
}
}
}
let overhead = if baseline_cost > 0 {
((decomposed_cost as f64 / baseline_cost as f64) - 1.0) * 100.0
} else {
0.0
};
EntanglementBudgetBenchmark {
circuits_tested: num_circuits,
segments_total,
segments_within_budget: segments_within,
max_violation,
decomposition_overhead_pct: overhead.max(0.0),
}
}
fn gen_entanglement_circuit(rng: &mut StdRng) -> QuantumCircuit {
let n = rng.gen_range(6..=40);
let mut circ = QuantumCircuit::new(n);
// Create two disconnected blocks with a bridge.
let half = n / 2;
for q in 0..half.saturating_sub(1) {
circ.h(q);
circ.cnot(q, q + 1);
}
for q in half..(n - 1) {
circ.h(q);
circ.cnot(q, q + 1);
}
// Occasional bridge gate.
if rng.gen_bool(0.3) && half > 0 && half < n {
circ.cnot(half - 1, half);
}
// Sprinkle some T gates.
let t_count = rng.gen_range(0..5);
for _ in 0..t_count {
circ.t(rng.gen_range(0..n));
}
circ
}
// ---------------------------------------------------------------------------
// Proof 3: Decoder benchmark
// ---------------------------------------------------------------------------
/// Result for a single code distance's decoder comparison.
pub struct DecoderBenchmarkResult {
pub distance: u32,
pub union_find_avg_ns: f64,
pub partitioned_avg_ns: f64,
pub speedup: f64,
pub union_find_accuracy: f64,
pub partitioned_accuracy: f64,
}
/// Run the decoder benchmark across multiple code distances.
pub fn run_decoder_benchmark(
seed: u64,
distances: &[u32],
rounds_per_distance: u32,
) -> Vec<DecoderBenchmarkResult> {
let mut rng = StdRng::seed_from_u64(seed);
let error_rate = 0.05;
let mut results = Vec::with_capacity(distances.len());
for &d in distances {
let uf_decoder = UnionFindDecoder::new(0);
let tile_size = (d / 2).max(2);
let part_decoder = PartitionedDecoder::new(tile_size, Box::new(UnionFindDecoder::new(0)));
let mut uf_total_ns = 0u64;
let mut part_total_ns = 0u64;
let mut uf_correct = 0u64;
let mut part_correct = 0u64;
for _ in 0..rounds_per_distance {
let syndrome = gen_syndrome(&mut rng, d, error_rate);
let uf_corr = uf_decoder.decode(&syndrome);
uf_total_ns += uf_corr.decode_time_ns;
let part_corr = part_decoder.decode(&syndrome);
part_total_ns += part_corr.decode_time_ns;
// A simple accuracy check: count defects and compare logical
// outcome expectation.
let defect_count = syndrome.stabilizers.iter().filter(|s| s.value).count();
let expected_logical = defect_count >= d as usize;
if uf_corr.logical_outcome == expected_logical {
uf_correct += 1;
}
if part_corr.logical_outcome == expected_logical {
part_correct += 1;
}
}
let r = rounds_per_distance as f64;
let uf_avg = uf_total_ns as f64 / r;
let part_avg = part_total_ns as f64 / r;
let speedup = if part_avg > 0.0 {
uf_avg / part_avg
} else {
1.0
};
results.push(DecoderBenchmarkResult {
distance: d,
union_find_avg_ns: uf_avg,
partitioned_avg_ns: part_avg,
speedup,
union_find_accuracy: uf_correct as f64 / r,
partitioned_accuracy: part_correct as f64 / r,
});
}
results
}
fn gen_syndrome(rng: &mut StdRng, distance: u32, error_rate: f64) -> SyndromeData {
let grid = if distance > 1 { distance - 1 } else { 1 };
let mut stabilizers = Vec::with_capacity((grid * grid) as usize);
for y in 0..grid {
for x in 0..grid {
stabilizers.push(StabilizerMeasurement {
x,
y,
round: 0,
value: rng.gen_bool(error_rate),
});
}
}
SyndromeData {
stabilizers,
code_distance: distance,
num_rounds: 1,
}
}
// ---------------------------------------------------------------------------
// Proof 4: Cross-backend certification
// ---------------------------------------------------------------------------
/// Results from the cross-backend certification benchmark.
pub struct CertificationBenchmark {
pub circuits_tested: usize,
pub certified: usize,
pub certification_rate: f64,
pub max_tvd: f64,
pub avg_tvd: f64,
pub tvd_bound: f64,
}
/// Run the certification benchmark: compare Clifford circuits across
/// state-vector and stabilizer backends, measuring TVD.
pub fn run_certification_benchmark(
seed: u64,
num_circuits: usize,
shots: u32,
) -> CertificationBenchmark {
let mut rng = StdRng::seed_from_u64(seed);
let tvd_bound = 0.15;
let mut certified = 0usize;
let mut max_tvd = 0.0_f64;
let mut tvd_sum = 0.0_f64;
let mut tested = 0usize;
for i in 0..num_circuits {
let circuit = gen_certifiable_circuit(&mut rng);
if !is_clifford_circuit(&circuit) || circuit.num_qubits() > 20 {
continue;
}
tested += 1;
let shot_seed = seed.wrapping_add(i as u64 * 9973);
// Run on state-vector backend.
let sv_result = Simulator::run_shots(&circuit, shots, Some(shot_seed));
let sv_counts = match sv_result {
Ok(r) => r.counts,
Err(_) => continue,
};
// Run on stabilizer backend.
let stab_counts = run_stabilizer_shots(&circuit, shots, shot_seed);
// Compute TVD.
let tvd = total_variation_distance(&sv_counts, &stab_counts);
tvd_sum += tvd;
if tvd > max_tvd {
max_tvd = tvd;
}
if tvd <= tvd_bound {
certified += 1;
}
}
let avg_tvd = if tested > 0 {
tvd_sum / tested as f64
} else {
0.0
};
let cert_rate = if tested > 0 {
certified as f64 / tested as f64
} else {
0.0
};
CertificationBenchmark {
circuits_tested: tested,
certified,
certification_rate: cert_rate,
max_tvd,
avg_tvd,
tvd_bound,
}
}
fn gen_certifiable_circuit(rng: &mut StdRng) -> QuantumCircuit {
let n = rng.gen_range(2..=10);
let mut circ = QuantumCircuit::new(n);
circ.h(0);
for q in 0..(n - 1) {
circ.cnot(q, q + 1);
}
let extras = rng.gen_range(0..n * 2);
for _ in 0..extras {
let q = rng.gen_range(0..n);
match rng.gen_range(0..4) {
0 => {
circ.h(q);
}
1 => {
circ.s(q);
}
2 => {
circ.x(q);
}
_ => {
circ.z(q);
}
}
}
// Add measurements for all qubits.
for q in 0..n {
circ.measure(q);
}
circ
}
// ---------------------------------------------------------------------------
// Master benchmark runner
// ---------------------------------------------------------------------------
/// Aggregated report from all four proof-point benchmarks.
pub struct FullBenchmarkReport {
pub routing: RoutingBenchmark,
pub entanglement: EntanglementBudgetBenchmark,
pub decoder: Vec<DecoderBenchmarkResult>,
pub certification: CertificationBenchmark,
pub total_time_ms: u64,
}
/// Run all four benchmarks with a single seed for reproducibility.
pub fn run_full_benchmark(seed: u64) -> FullBenchmarkReport {
let start = Instant::now();
let routing = run_routing_benchmark(seed, 1000);
let entanglement = run_entanglement_benchmark(seed.wrapping_add(1), 200);
let decoder = run_decoder_benchmark(
seed.wrapping_add(2),
&[3, 5, 7, 9, 11, 13, 15, 17, 21, 25],
100,
);
let certification = run_certification_benchmark(seed.wrapping_add(3), 100, 500);
let total_time_ms = start.elapsed().as_millis() as u64;
FullBenchmarkReport {
routing,
entanglement,
decoder,
certification,
total_time_ms,
}
}
/// Format a full benchmark report as a human-readable text summary.
pub fn format_report(report: &FullBenchmarkReport) -> String {
let mut out = String::with_capacity(2048);
out.push_str("=== ruqu-core Full Benchmark Report ===\n\n");
// -- Routing --
out.push_str("--- Proof 1: Cost-Model Routing ---\n");
out.push_str(&format!(
" Circuits tested: {}\n",
report.routing.num_circuits
));
out.push_str(&format!(
" Planner win rate vs naive: {:.1}%\n",
report.routing.planner_win_rate_vs_naive()
));
out.push_str(&format!(
" Median speedup vs naive: {:.2}x\n",
report.routing.median_speedup_vs_naive()
));
let mut heuristic_speedups: Vec<f64> = report
.routing
.results
.iter()
.map(|r| r.speedup_vs_heuristic)
.collect();
heuristic_speedups.sort_by(|a, b| a.partial_cmp(b).unwrap());
let median_h = if heuristic_speedups.is_empty() {
1.0
} else {
heuristic_speedups[heuristic_speedups.len() / 2]
};
out.push_str(&format!(
" Median speedup vs heuristic: {:.2}x\n\n",
median_h
));
// -- Entanglement --
out.push_str("--- Proof 2: Entanglement Budgeting ---\n");
let eb = &report.entanglement;
out.push_str(&format!(" Circuits tested: {}\n", eb.circuits_tested));
out.push_str(&format!(" Total segments: {}\n", eb.segments_total));
out.push_str(&format!(
" Within budget: {} ({:.1}%)\n",
eb.segments_within_budget,
if eb.segments_total > 0 {
eb.segments_within_budget as f64 / eb.segments_total as f64 * 100.0
} else {
0.0
}
));
out.push_str(&format!(
" Max violation: {:.2}%\n",
eb.max_violation * 100.0
));
out.push_str(&format!(
" Decomposition overhead: {:.1}%\n\n",
eb.decomposition_overhead_pct
));
// -- Decoder --
out.push_str("--- Proof 3: Adaptive Decoder Latency ---\n");
out.push_str(" distance | UF avg (ns) | Part avg (ns) | speedup | UF acc | Part acc\n");
out.push_str(" ---------+-------------+---------------+---------+---------+---------\n");
for d in &report.decoder {
out.push_str(&format!(
" {:>7} | {:>11.0} | {:>13.0} | {:>6.2}x | {:>6.1}% | {:>6.1}%\n",
d.distance,
d.union_find_avg_ns,
d.partitioned_avg_ns,
d.speedup,
d.union_find_accuracy * 100.0,
d.partitioned_accuracy * 100.0,
));
}
out.push('\n');
// -- Certification --
out.push_str("--- Proof 4: Cross-Backend Certification ---\n");
let c = &report.certification;
out.push_str(&format!(" Circuits tested: {}\n", c.circuits_tested));
out.push_str(&format!(" Certified: {}\n", c.certified));
out.push_str(&format!(
" Certification rate: {:.1}%\n",
c.certification_rate * 100.0
));
out.push_str(&format!(" Max TVD observed: {:.6}\n", c.max_tvd));
out.push_str(&format!(" Avg TVD: {:.6}\n", c.avg_tvd));
out.push_str(&format!(" TVD bound: {:.6}\n\n", c.tvd_bound));
// -- Summary --
out.push_str(&format!(
"Total benchmark time: {} ms\n",
report.total_time_ms
));
out
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_routing_benchmark_runs() {
let bench = run_routing_benchmark(42, 50);
assert_eq!(bench.num_circuits, 50);
assert_eq!(bench.results.len(), 50);
assert!(bench.planner_win_rate_vs_naive() > 0.0);
}
#[test]
fn test_entanglement_benchmark_runs() {
let bench = run_entanglement_benchmark(42, 20);
assert_eq!(bench.circuits_tested, 20);
assert!(bench.segments_total > 0);
}
#[test]
fn test_decoder_benchmark_runs() {
let results = run_decoder_benchmark(42, &[3, 5, 7], 10);
assert_eq!(results.len(), 3);
for r in &results {
assert!(r.union_find_avg_ns >= 0.0);
assert!(r.partitioned_avg_ns >= 0.0);
}
}
#[test]
fn test_certification_benchmark_runs() {
let bench = run_certification_benchmark(42, 10, 100);
assert!(bench.circuits_tested > 0);
assert!(bench.certification_rate >= 0.0);
assert!(bench.certification_rate <= 1.0);
}
#[test]
fn test_format_report_nonempty() {
let report = FullBenchmarkReport {
routing: run_routing_benchmark(0, 10),
entanglement: run_entanglement_benchmark(0, 5),
decoder: run_decoder_benchmark(0, &[3, 5], 5),
certification: run_certification_benchmark(0, 5, 50),
total_time_ms: 42,
};
let text = format_report(&report);
assert!(text.contains("Proof 1"));
assert!(text.contains("Proof 2"));
assert!(text.contains("Proof 3"));
assert!(text.contains("Proof 4"));
assert!(text.contains("Total benchmark time"));
}
#[test]
fn test_routing_speedup_for_clifford() {
// Pure Clifford circuit: planner should choose Stabilizer,
// which is faster than naive StateVector.
let mut circ = QuantumCircuit::new(50);
for q in 0..50 {
circ.h(q);
}
for q in 0..49 {
circ.cnot(q, q + 1);
}
let plan = plan_execution(&circ, &PlannerConfig::default());
assert_eq!(plan.backend, BackendType::Stabilizer);
let planner_ns = predicted_runtime_ns(&circ, plan.backend);
let naive_ns = predicted_runtime_ns(&circ, BackendType::StateVector);
assert!(
planner_ns < naive_ns,
"Stabilizer should be faster than SV for 50-qubit Clifford"
);
}
}

View File

@@ -0,0 +1,185 @@
//! Quantum circuit: a fluent builder for ordered gate sequences
use crate::gate::Gate;
use crate::types::QubitIndex;
/// A quantum circuit consisting of an ordered sequence of gates on a qubit register.
#[derive(Debug, Clone)]
pub struct QuantumCircuit {
gates: Vec<Gate>,
num_qubits: u32,
}
impl QuantumCircuit {
/// Create a new empty circuit for the given number of qubits.
pub fn new(num_qubits: u32) -> Self {
Self {
gates: Vec::new(),
num_qubits,
}
}
// -------------------------------------------------------------------
// Fluent single-qubit gate methods
// -------------------------------------------------------------------
pub fn h(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::H(q));
self
}
pub fn x(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::X(q));
self
}
pub fn y(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::Y(q));
self
}
pub fn z(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::Z(q));
self
}
pub fn s(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::S(q));
self
}
pub fn t(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::T(q));
self
}
pub fn rx(&mut self, q: QubitIndex, angle: f64) -> &mut Self {
self.gates.push(Gate::Rx(q, angle));
self
}
pub fn ry(&mut self, q: QubitIndex, angle: f64) -> &mut Self {
self.gates.push(Gate::Ry(q, angle));
self
}
pub fn rz(&mut self, q: QubitIndex, angle: f64) -> &mut Self {
self.gates.push(Gate::Rz(q, angle));
self
}
pub fn phase(&mut self, q: QubitIndex, angle: f64) -> &mut Self {
self.gates.push(Gate::Phase(q, angle));
self
}
// -------------------------------------------------------------------
// Fluent two-qubit gate methods
// -------------------------------------------------------------------
pub fn cnot(&mut self, control: QubitIndex, target: QubitIndex) -> &mut Self {
self.gates.push(Gate::CNOT(control, target));
self
}
pub fn cz(&mut self, q1: QubitIndex, q2: QubitIndex) -> &mut Self {
self.gates.push(Gate::CZ(q1, q2));
self
}
pub fn swap(&mut self, q1: QubitIndex, q2: QubitIndex) -> &mut Self {
self.gates.push(Gate::SWAP(q1, q2));
self
}
pub fn rzz(&mut self, q1: QubitIndex, q2: QubitIndex, angle: f64) -> &mut Self {
self.gates.push(Gate::Rzz(q1, q2, angle));
self
}
// -------------------------------------------------------------------
// Special operations
// -------------------------------------------------------------------
pub fn measure(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::Measure(q));
self
}
/// Add a measurement gate to every qubit.
pub fn measure_all(&mut self) -> &mut Self {
for q in 0..self.num_qubits {
self.gates.push(Gate::Measure(q));
}
self
}
pub fn reset(&mut self, q: QubitIndex) -> &mut Self {
self.gates.push(Gate::Reset(q));
self
}
pub fn barrier(&mut self) -> &mut Self {
self.gates.push(Gate::Barrier);
self
}
/// Push an arbitrary gate onto the circuit.
pub fn add_gate(&mut self, gate: Gate) -> &mut Self {
self.gates.push(gate);
self
}
// -------------------------------------------------------------------
// Accessors
// -------------------------------------------------------------------
pub fn gates(&self) -> &[Gate] {
&self.gates
}
pub fn num_qubits(&self) -> u32 {
self.num_qubits
}
pub fn gate_count(&self) -> usize {
self.gates.len()
}
/// Compute the circuit depth: the longest path through the circuit
/// taking qubit dependencies into account.
///
/// A `Barrier` synchronises all qubits to the current maximum depth.
pub fn depth(&self) -> u32 {
let mut qubit_depth = vec![0u32; self.num_qubits as usize];
for gate in &self.gates {
match gate {
Gate::Barrier => {
let max_d = qubit_depth.iter().copied().max().unwrap_or(0);
for d in qubit_depth.iter_mut() {
*d = max_d;
}
}
other => {
let qubits = other.qubits();
if qubits.is_empty() {
continue;
}
let max_d = qubits
.iter()
.map(|&q| qubit_depth.get(q as usize).copied().unwrap_or(0))
.max()
.unwrap_or(0);
for &q in &qubits {
if (q as usize) < qubit_depth.len() {
qubit_depth[q as usize] = max_d + 1;
}
}
}
}
}
qubit_depth.into_iter().max().unwrap_or(0)
}
}

View File

@@ -0,0 +1,446 @@
//! Circuit analysis utilities for simulation backend selection.
//!
//! Provides detailed structural analysis of quantum circuits to enable
//! intelligent routing to the optimal simulation backend. This module
//! complements [`crate::backend`] by exposing lower-level classification
//! and structural queries that advanced users or future optimisation passes
//! may need independently.
use crate::circuit::QuantumCircuit;
use crate::gate::Gate;
use crate::types::QubitIndex;
use std::collections::HashSet;
// ---------------------------------------------------------------------------
// Gate classification
// ---------------------------------------------------------------------------
/// Detailed gate classification for routing decisions.
///
/// Every [`Gate`] variant maps to exactly one `GateClass`, making it easy to
/// partition a circuit by gate type without pattern-matching on every variant.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum GateClass {
/// Clifford gate (H, S, Sdg, X, Y, Z, CNOT, CZ, SWAP).
Clifford,
/// Non-Clifford unitary (T, Tdg, rotations, custom unitary).
NonClifford,
/// Measurement operation.
Measurement,
/// Reset operation.
Reset,
/// Barrier (scheduling hint, no physical effect).
Barrier,
}
/// Classify a single gate for backend routing.
///
/// # Example
///
/// ```
/// use ruqu_core::gate::Gate;
/// use ruqu_core::circuit_analyzer::{classify_gate, GateClass};
///
/// assert_eq!(classify_gate(&Gate::H(0)), GateClass::Clifford);
/// assert_eq!(classify_gate(&Gate::T(0)), GateClass::NonClifford);
/// assert_eq!(classify_gate(&Gate::Measure(0)), GateClass::Measurement);
/// ```
pub fn classify_gate(gate: &Gate) -> GateClass {
match gate {
Gate::H(_)
| Gate::X(_)
| Gate::Y(_)
| Gate::Z(_)
| Gate::S(_)
| Gate::Sdg(_)
| Gate::CNOT(_, _)
| Gate::CZ(_, _)
| Gate::SWAP(_, _) => GateClass::Clifford,
Gate::T(_)
| Gate::Tdg(_)
| Gate::Rx(_, _)
| Gate::Ry(_, _)
| Gate::Rz(_, _)
| Gate::Phase(_, _)
| Gate::Rzz(_, _, _)
| Gate::Unitary1Q(_, _) => GateClass::NonClifford,
Gate::Measure(_) => GateClass::Measurement,
Gate::Reset(_) => GateClass::Reset,
Gate::Barrier => GateClass::Barrier,
}
}
// ---------------------------------------------------------------------------
// Clifford analysis
// ---------------------------------------------------------------------------
/// Check if a circuit is entirely Clifford-compatible.
///
/// A circuit is Clifford-compatible when every gate is either a Clifford
/// unitary, a measurement, a reset, or a barrier. Such circuits can be
/// simulated in polynomial time using the stabilizer formalism.
///
/// # Example
///
/// ```
/// use ruqu_core::circuit::QuantumCircuit;
/// use ruqu_core::circuit_analyzer::is_clifford_circuit;
///
/// let mut circ = QuantumCircuit::new(3);
/// circ.h(0).cnot(0, 1).cnot(1, 2);
/// assert!(is_clifford_circuit(&circ));
///
/// circ.t(0);
/// assert!(!is_clifford_circuit(&circ));
/// ```
pub fn is_clifford_circuit(circuit: &QuantumCircuit) -> bool {
circuit.gates().iter().all(|g| {
let class = classify_gate(g);
class == GateClass::Clifford
|| class == GateClass::Measurement
|| class == GateClass::Reset
|| class == GateClass::Barrier
})
}
/// Count the number of non-Clifford gates in a circuit.
///
/// This is the primary cost metric for stabilizer-based simulation with
/// magic-state injection: each non-Clifford gate requires exponentially
/// more resources to handle exactly.
pub fn count_non_clifford(circuit: &QuantumCircuit) -> usize {
circuit
.gates()
.iter()
.filter(|g| classify_gate(g) == GateClass::NonClifford)
.count()
}
// ---------------------------------------------------------------------------
// Entanglement and connectivity analysis
// ---------------------------------------------------------------------------
/// Analyze the entanglement structure of a circuit.
///
/// Returns the set of qubit pairs that are directly entangled by at least
/// one two-qubit gate. Pairs are returned with the smaller index first.
///
/// # Example
///
/// ```
/// use ruqu_core::circuit::QuantumCircuit;
/// use ruqu_core::circuit_analyzer::entanglement_pairs;
///
/// let mut circ = QuantumCircuit::new(4);
/// circ.cnot(0, 2).cz(1, 3);
/// let pairs = entanglement_pairs(&circ);
/// assert!(pairs.contains(&(0, 2)));
/// assert!(pairs.contains(&(1, 3)));
/// assert_eq!(pairs.len(), 2);
/// ```
pub fn entanglement_pairs(circuit: &QuantumCircuit) -> HashSet<(QubitIndex, QubitIndex)> {
let mut pairs = HashSet::new();
for gate in circuit.gates() {
let qubits = gate.qubits();
if qubits.len() == 2 {
let (a, b) = if qubits[0] < qubits[1] {
(qubits[0], qubits[1])
} else {
(qubits[1], qubits[0])
};
pairs.insert((a, b));
}
}
pairs
}
/// Check if all two-qubit gates act on nearest-neighbor qubits.
///
/// A circuit with only nearest-neighbor interactions maps efficiently to
/// linear qubit topologies and is a good candidate for Matrix Product State
/// (MPS) tensor-network simulation.
pub fn is_nearest_neighbor(circuit: &QuantumCircuit) -> bool {
circuit.gates().iter().all(|gate| {
let qubits = gate.qubits();
if qubits.len() == 2 {
let dist = if qubits[0] > qubits[1] {
qubits[0] - qubits[1]
} else {
qubits[1] - qubits[0]
};
dist <= 1
} else {
true
}
})
}
// ---------------------------------------------------------------------------
// Bond dimension estimation
// ---------------------------------------------------------------------------
/// Estimate the maximum bond dimension needed for MPS simulation.
///
/// Scans every possible bipartition of the qubit register (cuts between
/// position `k-1` and `k` for `k` in `1..n`) and counts how many two-qubit
/// gates straddle each cut. The bond dimension grows exponentially with the
/// number of entangling gates across the worst-case cut, capped at 2^20
/// (roughly 1 million) as a practical limit.
///
/// This is a rough *upper bound*; cancellations and limited entanglement
/// growth mean the actual bond dimension required may be much lower.
pub fn estimate_bond_dimension(circuit: &QuantumCircuit) -> usize {
let n = circuit.num_qubits();
let mut max_entanglement_across_cut = 0usize;
// For each possible bipartition cut position.
for cut in 1..n {
let mut gates_crossing_cut = 0usize;
for gate in circuit.gates() {
let qubits = gate.qubits();
if qubits.len() == 2 {
let (lo, hi) = if qubits[0] < qubits[1] {
(qubits[0], qubits[1])
} else {
(qubits[1], qubits[0])
};
if lo < cut && hi >= cut {
gates_crossing_cut += 1;
}
}
}
if gates_crossing_cut > max_entanglement_across_cut {
max_entanglement_across_cut = gates_crossing_cut;
}
}
// Bond dimension is 2^(gates across cut), bounded to avoid overflow.
let exponent = max_entanglement_across_cut.min(20) as u32;
2usize.saturating_pow(exponent)
}
// ---------------------------------------------------------------------------
// Circuit summary
// ---------------------------------------------------------------------------
/// Summary of circuit characteristics for display and diagnostics.
#[derive(Debug, Clone)]
pub struct CircuitSummary {
/// Number of qubits in the register.
pub num_qubits: u32,
/// Circuit depth (longest qubit timeline).
pub depth: u32,
/// Total number of gates (including measurements and barriers).
pub total_gates: usize,
/// Number of Clifford gates.
pub clifford_count: usize,
/// Number of non-Clifford unitary gates.
pub non_clifford_count: usize,
/// Number of measurement gates.
pub measurement_count: usize,
/// Whether the circuit contains only Clifford gates (plus measurements/resets).
pub is_clifford_only: bool,
/// Whether all two-qubit gates are nearest-neighbor.
pub is_nearest_neighbor: bool,
/// Estimated maximum MPS bond dimension.
pub estimated_bond_dim: usize,
/// Human-readable state-vector memory requirement.
pub state_vector_memory: String,
}
/// Generate a comprehensive summary of a circuit.
///
/// Collects all structural statistics in a single pass and returns them
/// in a [`CircuitSummary`] suitable for logging or display.
///
/// # Example
///
/// ```
/// use ruqu_core::circuit::QuantumCircuit;
/// use ruqu_core::circuit_analyzer::summarize_circuit;
///
/// let mut circ = QuantumCircuit::new(4);
/// circ.h(0).cnot(0, 1).t(2).measure(3);
/// let summary = summarize_circuit(&circ);
/// assert_eq!(summary.num_qubits, 4);
/// assert_eq!(summary.clifford_count, 2);
/// assert_eq!(summary.non_clifford_count, 1);
/// assert_eq!(summary.measurement_count, 1);
/// ```
pub fn summarize_circuit(circuit: &QuantumCircuit) -> CircuitSummary {
let num_qubits = circuit.num_qubits();
let total_gates = circuit.gate_count();
let depth = circuit.depth();
let mut clifford_count = 0;
let mut non_clifford_count = 0;
let mut measurement_count = 0;
for gate in circuit.gates() {
match classify_gate(gate) {
GateClass::Clifford => clifford_count += 1,
GateClass::NonClifford => non_clifford_count += 1,
GateClass::Measurement => measurement_count += 1,
_ => {}
}
}
let state_vector_memory = format_sv_memory(num_qubits);
CircuitSummary {
num_qubits,
depth,
total_gates,
clifford_count,
non_clifford_count,
measurement_count,
is_clifford_only: non_clifford_count == 0,
is_nearest_neighbor: is_nearest_neighbor(circuit),
estimated_bond_dim: estimate_bond_dimension(circuit),
state_vector_memory,
}
}
/// Format the state-vector memory requirement for display.
fn format_sv_memory(num_qubits: u32) -> String {
let bytes = (1u128 << num_qubits) * 16;
if bytes >= 1 << 40 {
format!("{:.1} TiB", bytes as f64 / (1u128 << 40) as f64)
} else if bytes >= 1 << 30 {
format!("{:.1} GiB", bytes as f64 / (1u128 << 30) as f64)
} else if bytes >= 1 << 20 {
format!("{:.1} MiB", bytes as f64 / (1u128 << 20) as f64)
} else {
format!("{} bytes", bytes)
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::circuit::QuantumCircuit;
#[test]
fn classify_all_gate_types() {
assert_eq!(classify_gate(&Gate::H(0)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::X(0)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::Y(0)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::Z(0)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::S(0)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::Sdg(0)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::CNOT(0, 1)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::CZ(0, 1)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::SWAP(0, 1)), GateClass::Clifford);
assert_eq!(classify_gate(&Gate::T(0)), GateClass::NonClifford);
assert_eq!(classify_gate(&Gate::Tdg(0)), GateClass::NonClifford);
assert_eq!(classify_gate(&Gate::Rx(0, 1.0)), GateClass::NonClifford);
assert_eq!(classify_gate(&Gate::Ry(0, 1.0)), GateClass::NonClifford);
assert_eq!(classify_gate(&Gate::Rz(0, 1.0)), GateClass::NonClifford);
assert_eq!(classify_gate(&Gate::Phase(0, 1.0)), GateClass::NonClifford);
assert_eq!(classify_gate(&Gate::Rzz(0, 1, 1.0)), GateClass::NonClifford);
assert_eq!(classify_gate(&Gate::Measure(0)), GateClass::Measurement);
assert_eq!(classify_gate(&Gate::Reset(0)), GateClass::Reset);
assert_eq!(classify_gate(&Gate::Barrier), GateClass::Barrier);
}
#[test]
fn clifford_circuit_detection() {
let mut circ = QuantumCircuit::new(4);
circ.h(0).cnot(0, 1).s(2).cz(2, 3).measure(0);
assert!(is_clifford_circuit(&circ));
circ.t(0);
assert!(!is_clifford_circuit(&circ));
}
#[test]
fn non_clifford_count() {
let mut circ = QuantumCircuit::new(3);
circ.h(0).t(0).t(1).rx(2, 0.5);
assert_eq!(count_non_clifford(&circ), 3);
}
#[test]
fn entanglement_pair_tracking() {
let mut circ = QuantumCircuit::new(5);
circ.cnot(0, 3).cz(1, 4).swap(0, 3);
let pairs = entanglement_pairs(&circ);
assert!(pairs.contains(&(0, 3)));
assert!(pairs.contains(&(1, 4)));
// Duplicate pair (0,3) should not increase count.
assert_eq!(pairs.len(), 2);
}
#[test]
fn nearest_neighbor_detection() {
let mut circ = QuantumCircuit::new(4);
circ.cnot(0, 1).cnot(1, 2).cnot(2, 3);
assert!(is_nearest_neighbor(&circ));
circ.cnot(0, 3);
assert!(!is_nearest_neighbor(&circ));
}
#[test]
fn bond_dimension_empty_circuit() {
let circ = QuantumCircuit::new(5);
assert_eq!(estimate_bond_dimension(&circ), 1);
}
#[test]
fn bond_dimension_linear_chain() {
let mut circ = QuantumCircuit::new(4);
// Single CNOT across cut at position 2: only one gate crosses.
circ.cnot(1, 2);
// Expected: 2^1 = 2
assert_eq!(estimate_bond_dimension(&circ), 2);
}
#[test]
fn bond_dimension_multiple_crossings() {
let mut circ = QuantumCircuit::new(4);
// Three gates cross the cut between qubit 1 and qubit 2.
circ.cnot(0, 2).cnot(1, 3).cnot(0, 3);
// Cut at position 2: all three gates cross -> 2^3 = 8
assert_eq!(estimate_bond_dimension(&circ), 8);
}
#[test]
fn summary_basic() {
let mut circ = QuantumCircuit::new(4);
circ.h(0).t(1).cnot(0, 1).measure(0).measure(1);
let summary = summarize_circuit(&circ);
assert_eq!(summary.num_qubits, 4);
assert_eq!(summary.total_gates, 5);
assert_eq!(summary.clifford_count, 2); // H + CNOT
assert_eq!(summary.non_clifford_count, 1); // T
assert_eq!(summary.measurement_count, 2);
assert!(!summary.is_clifford_only);
assert!(summary.is_nearest_neighbor);
}
#[test]
fn summary_clifford_only_flag() {
let mut circ = QuantumCircuit::new(2);
circ.h(0).cnot(0, 1);
let summary = summarize_circuit(&circ);
assert!(summary.is_clifford_only);
}
#[test]
fn summary_memory_string() {
let circ = QuantumCircuit::new(10);
let summary = summarize_circuit(&circ);
// 2^10 * 16 = 16384 bytes
assert_eq!(summary.state_vector_memory, "16384 bytes");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,931 @@
//! Confidence bounds, statistical tests, and convergence utilities for
//! quantum measurement analysis.
//!
//! This module provides tools for reasoning about the statistical quality of
//! shot-based quantum simulation results, including confidence intervals for
//! binomial proportions, expectation values, shot budget estimation, distribution
//! distance metrics, goodness-of-fit tests, and convergence monitoring.
use std::collections::HashMap;
// ---------------------------------------------------------------------------
// Core types
// ---------------------------------------------------------------------------
/// A confidence interval around a point estimate.
#[derive(Debug, Clone)]
pub struct ConfidenceInterval {
/// Lower bound of the interval.
pub lower: f64,
/// Upper bound of the interval.
pub upper: f64,
/// Point estimate (e.g., sample proportion).
pub point_estimate: f64,
/// Confidence level, e.g., 0.95 for a 95 % interval.
pub confidence_level: f64,
/// Human-readable label for the method used.
pub method: &'static str,
}
/// Result of a chi-squared goodness-of-fit test.
#[derive(Debug, Clone)]
pub struct ChiSquaredResult {
/// The chi-squared statistic.
pub statistic: f64,
/// Degrees of freedom (number of categories minus one).
pub degrees_of_freedom: usize,
/// Approximate p-value.
pub p_value: f64,
/// Whether the result is significant at the 0.05 level.
pub significant: bool,
}
/// Tracks a running sequence of estimates and detects convergence.
pub struct ConvergenceMonitor {
estimates: Vec<f64>,
window_size: usize,
}
// ---------------------------------------------------------------------------
// Helpers: inverse normal CDF (z-score)
// ---------------------------------------------------------------------------
/// Approximate the z-score (inverse standard-normal CDF) for a given two-sided
/// confidence level using the rational approximation of Abramowitz & Stegun
/// (formula 26.2.23).
///
/// For confidence level `c`, we compute the upper quantile at
/// `p = (1 + c) / 2` and return the corresponding z-value.
///
/// # Panics
///
/// Panics if `confidence` is not in the open interval (0, 1).
pub fn z_score(confidence: f64) -> f64 {
assert!(
confidence > 0.0 && confidence < 1.0,
"confidence must be in (0, 1)"
);
let p = (1.0 + confidence) / 2.0; // upper tail probability
// 1 - p is the tail area; for p close to 1 this is small and positive.
let tail = 1.0 - p;
// Rational approximation: for tail area `q`, set t = sqrt(-2 ln q).
let t = (-2.0_f64 * tail.ln()).sqrt();
// Coefficients (Abramowitz & Stegun 26.2.23)
let c0 = 2.515517;
let c1 = 0.802853;
let c2 = 0.010328;
let d1 = 1.432788;
let d2 = 0.189269;
let d3 = 0.001308;
t - (c0 + c1 * t + c2 * t * t) / (1.0 + d1 * t + d2 * t * t + d3 * t * t * t)
}
// ---------------------------------------------------------------------------
// Wilson score interval
// ---------------------------------------------------------------------------
/// Compute the Wilson score confidence interval for a binomial proportion.
///
/// The Wilson interval is centred near the MLE but accounts for the discrete
/// nature of the binomial and never produces bounds outside [0, 1].
///
/// # Arguments
///
/// * `successes` -- number of successes observed.
/// * `trials` -- total number of trials (must be > 0).
/// * `confidence` -- desired confidence level in (0, 1).
pub fn wilson_interval(successes: usize, trials: usize, confidence: f64) -> ConfidenceInterval {
assert!(trials > 0, "trials must be > 0");
assert!(
confidence > 0.0 && confidence < 1.0,
"confidence must be in (0, 1)"
);
let n = trials as f64;
let p_hat = successes as f64 / n;
let z = z_score(confidence);
let z2 = z * z;
let denom = 1.0 + z2 / n;
let centre = (p_hat + z2 / (2.0 * n)) / denom;
let half_width = z * (p_hat * (1.0 - p_hat) / n + z2 / (4.0 * n * n)).sqrt() / denom;
let lower = (centre - half_width).max(0.0);
let upper = (centre + half_width).min(1.0);
ConfidenceInterval {
lower,
upper,
point_estimate: p_hat,
confidence_level: confidence,
method: "wilson",
}
}
// ---------------------------------------------------------------------------
// Clopper-Pearson exact interval
// ---------------------------------------------------------------------------
/// Compute the Clopper-Pearson (exact) confidence interval for a binomial
/// proportion via bisection on the binomial CDF.
///
/// This interval is conservative -- it guarantees at least the nominal coverage
/// probability, but may be wider than necessary.
///
/// # Arguments
///
/// * `successes` -- number of successes observed.
/// * `trials` -- total number of trials (must be > 0).
/// * `confidence` -- desired confidence level in (0, 1).
pub fn clopper_pearson(successes: usize, trials: usize, confidence: f64) -> ConfidenceInterval {
assert!(trials > 0, "trials must be > 0");
assert!(
confidence > 0.0 && confidence < 1.0,
"confidence must be in (0, 1)"
);
let alpha = 1.0 - confidence;
let n = trials;
let k = successes;
let p_hat = k as f64 / n as f64;
// Lower bound: find p such that P(X >= k | n, p) = alpha/2,
// equivalently P(X <= k-1 | n, p) = 1 - alpha/2.
let lower = if k == 0 {
0.0
} else {
bisect_binomial_cdf(n, k - 1, 1.0 - alpha / 2.0)
};
// Upper bound: find p such that P(X <= k | n, p) = alpha/2.
let upper = if k == n {
1.0
} else {
bisect_binomial_cdf(n, k, alpha / 2.0)
};
ConfidenceInterval {
lower,
upper,
point_estimate: p_hat,
confidence_level: confidence,
method: "clopper-pearson",
}
}
/// Use bisection to find `p` such that `binomial_cdf(n, k, p) = target`.
///
/// `binomial_cdf(n, k, p)` = sum_{i=0}^{k} C(n,i) p^i (1-p)^{n-i}.
fn bisect_binomial_cdf(n: usize, k: usize, target: f64) -> f64 {
let mut lo = 0.0_f64;
let mut hi = 1.0_f64;
for _ in 0..200 {
let mid = (lo + hi) / 2.0;
let cdf = binomial_cdf(n, k, mid);
if cdf < target {
// CDF is too small; increasing p increases CDF, so move lo up.
// Actually: increasing p *decreases* P(X <= k) when k < n.
// Let's think carefully:
// P(X <= k | p) is monotonically *decreasing* in p for k < n.
// So if cdf < target we need to *decrease* p.
hi = mid;
} else {
lo = mid;
}
if (hi - lo) < 1e-15 {
break;
}
}
(lo + hi) / 2.0
}
/// Evaluate the binomial CDF: P(X <= k) where X ~ Bin(n, p).
///
/// Uses a log-space computation to avoid overflow for large n.
fn binomial_cdf(n: usize, k: usize, p: f64) -> f64 {
if p <= 0.0 {
return 1.0;
}
if p >= 1.0 {
return if k >= n { 1.0 } else { 0.0 };
}
if k >= n {
return 1.0;
}
// Use the regularised incomplete beta function identity:
// P(X <= k | n, p) = I_{1-p}(n - k, k + 1)
// We compute the CDF directly via summation in log-space for moderate n.
// For very large n this could be slow, but quantum shot counts are typically
// at most millions, and this is called from bisection which only needs
// ~200 evaluations.
let mut cdf = 0.0_f64;
// log_binom accumulates log(C(n, i)) incrementally.
let ln_p = p.ln();
let ln_1mp = (1.0 - p).ln();
// Start with i = 0: C(n,0) * p^0 * (1-p)^n
let mut log_binom = 0.0_f64; // log C(n, 0) = 0
cdf += (log_binom + ln_1mp * n as f64).exp();
for i in 1..=k {
// log C(n, i) = log C(n, i-1) + log(n - i + 1) - log(i)
log_binom += ((n - i + 1) as f64).ln() - (i as f64).ln();
let log_term = log_binom + ln_p * i as f64 + ln_1mp * (n - i) as f64;
cdf += log_term.exp();
}
cdf.min(1.0).max(0.0)
}
// ---------------------------------------------------------------------------
// Expectation value confidence interval
// ---------------------------------------------------------------------------
/// Compute a confidence interval for the expectation value <Z> of a given
/// qubit from shot counts.
///
/// For qubit `q`, the Z expectation value is `P(0) - P(1)` where P(0) is the
/// fraction of shots where qubit `q` measured `false` and P(1) where it
/// measured `true`.
///
/// The standard error is computed from the multinomial variance:
/// Var(<Z>) = (1 - <Z>^2) / n
/// SE = sqrt(Var(<Z>) / n) ... but more precisely, each shot produces
/// a value +1 or -1 so Var = 1 - mean^2, and SE = sqrt(Var / n).
///
/// The returned interval is `<Z> +/- z * SE`.
pub fn expectation_confidence(
counts: &HashMap<Vec<bool>, usize>,
qubit: u32,
confidence: f64,
) -> ConfidenceInterval {
assert!(
confidence > 0.0 && confidence < 1.0,
"confidence must be in (0, 1)"
);
let mut n_zero: usize = 0;
let mut n_one: usize = 0;
for (bits, &count) in counts {
if let Some(&b) = bits.get(qubit as usize) {
if b {
n_one += count;
} else {
n_zero += count;
}
}
}
let total = (n_zero + n_one) as f64;
assert!(total > 0.0, "no shots found for the given qubit");
let p0 = n_zero as f64 / total;
let p1 = n_one as f64 / total;
let exp_z = p0 - p1; // <Z>
// Each shot yields +1 (qubit=0) or -1 (qubit=1).
// Variance of a single shot = E[X^2] - E[X]^2 = 1 - exp_z^2.
let var_single = 1.0 - exp_z * exp_z;
let se = (var_single / total).sqrt();
let z = z_score(confidence);
let lower = (exp_z - z * se).max(-1.0);
let upper = (exp_z + z * se).min(1.0);
ConfidenceInterval {
lower,
upper,
point_estimate: exp_z,
confidence_level: confidence,
method: "expectation-z-se",
}
}
// ---------------------------------------------------------------------------
// Shot budget calculator
// ---------------------------------------------------------------------------
/// Compute the minimum number of shots required so that the additive error of
/// an empirical probability is at most `epsilon` with probability at least
/// `1 - delta`, using the Hoeffding bound.
///
/// Formula: N >= ln(2 / delta) / (2 * epsilon^2)
///
/// # Panics
///
/// Panics if `epsilon` or `delta` is not in (0, 1).
pub fn required_shots(epsilon: f64, delta: f64) -> usize {
assert!(epsilon > 0.0 && epsilon < 1.0, "epsilon must be in (0, 1)");
assert!(delta > 0.0 && delta < 1.0, "delta must be in (0, 1)");
let n = (2.0_f64 / delta).ln() / (2.0 * epsilon * epsilon);
n.ceil() as usize
}
// ---------------------------------------------------------------------------
// Total variation distance
// ---------------------------------------------------------------------------
/// Compute the total variation distance between two empirical distributions
/// given as shot-count histograms.
///
/// TVD = 0.5 * sum_i |p_i - q_i| over all bitstrings present in either
/// distribution.
pub fn total_variation_distance(
p: &HashMap<Vec<bool>, usize>,
q: &HashMap<Vec<bool>, usize>,
) -> f64 {
let total_p: f64 = p.values().sum::<usize>() as f64;
let total_q: f64 = q.values().sum::<usize>() as f64;
if total_p == 0.0 && total_q == 0.0 {
return 0.0;
}
// Collect all keys from both distributions.
let mut all_keys: Vec<&Vec<bool>> = Vec::new();
for key in p.keys() {
all_keys.push(key);
}
for key in q.keys() {
if !p.contains_key(key) {
all_keys.push(key);
}
}
let mut tvd = 0.0_f64;
for key in &all_keys {
let pi = if total_p > 0.0 {
*p.get(*key).unwrap_or(&0) as f64 / total_p
} else {
0.0
};
let qi = if total_q > 0.0 {
*q.get(*key).unwrap_or(&0) as f64 / total_q
} else {
0.0
};
tvd += (pi - qi).abs();
}
0.5 * tvd
}
// ---------------------------------------------------------------------------
// Chi-squared test
// ---------------------------------------------------------------------------
/// Perform a chi-squared goodness-of-fit test comparing an observed
/// distribution to an expected distribution.
///
/// The expected distribution is scaled to match the total number of observed
/// counts. The p-value is approximated using the Wilson-Hilferty cube-root
/// transformation of the chi-squared CDF.
///
/// # Panics
///
/// Panics if there are no categories or if the expected distribution has zero
/// total counts.
pub fn chi_squared_test(
observed: &HashMap<Vec<bool>, usize>,
expected: &HashMap<Vec<bool>, usize>,
) -> ChiSquaredResult {
let total_observed: f64 = observed.values().sum::<usize>() as f64;
let total_expected: f64 = expected.values().sum::<usize>() as f64;
assert!(
total_expected > 0.0,
"expected distribution must have nonzero total"
);
// Collect all keys.
let mut all_keys: Vec<&Vec<bool>> = Vec::new();
for key in observed.keys() {
all_keys.push(key);
}
for key in expected.keys() {
if !observed.contains_key(key) {
all_keys.push(key);
}
}
let mut statistic = 0.0_f64;
let mut num_categories = 0_usize;
for key in &all_keys {
let o = *observed.get(*key).unwrap_or(&0) as f64;
// Scale expected counts to match observed total.
let e_raw = *expected.get(*key).unwrap_or(&0) as f64;
let e = e_raw * total_observed / total_expected;
if e > 0.0 {
statistic += (o - e) * (o - e) / e;
num_categories += 1;
}
}
let df = if num_categories > 1 {
num_categories - 1
} else {
1
};
let p_value = chi_squared_survival(statistic, df);
ChiSquaredResult {
statistic,
degrees_of_freedom: df,
p_value,
significant: p_value < 0.05,
}
}
/// Approximate the survival function (1 - CDF) of the chi-squared distribution
/// using the Wilson-Hilferty normal approximation.
///
/// For chi-squared random variable X with k degrees of freedom:
/// (X/k)^{1/3} is approximately normal with mean 1 - 2/(9k)
/// and variance 2/(9k).
///
/// So P(X > x) approx P(Z > z) where
/// z = ((x/k)^{1/3} - (1 - 2/(9k))) / sqrt(2/(9k))
/// and P(Z > z) = 1 - Phi(z) = Phi(-z).
fn chi_squared_survival(x: f64, df: usize) -> f64 {
if df == 0 {
return if x > 0.0 { 0.0 } else { 1.0 };
}
if x <= 0.0 {
return 1.0;
}
let k = df as f64;
let term = 2.0 / (9.0 * k);
let cube_root = (x / k).powf(1.0 / 3.0);
let z = (cube_root - (1.0 - term)) / term.sqrt();
// P(Z > z) = 1 - Phi(z) = Phi(-z)
normal_cdf(-z)
}
/// Approximate the standard normal CDF using the Abramowitz & Stegun
/// approximation (formula 7.1.26).
fn normal_cdf(x: f64) -> f64 {
// Use the error function relation: Phi(x) = 0.5 * (1 + erf(x / sqrt(2)))
// We approximate erf via the Horner form of the A&S rational approximation.
let sign = if x < 0.0 { -1.0 } else { 1.0 };
let x_abs = x.abs();
let t = 1.0 / (1.0 + 0.2316419 * x_abs);
let d = 0.3989422804014327; // 1/sqrt(2*pi)
let p = d * (-x_abs * x_abs / 2.0).exp();
let poly = t
* (0.319381530
+ t * (-0.356563782 + t * (1.781477937 + t * (-1.821255978 + t * 1.330274429))));
if sign > 0.0 {
1.0 - p * poly
} else {
p * poly
}
}
// ---------------------------------------------------------------------------
// Convergence monitor
// ---------------------------------------------------------------------------
impl ConvergenceMonitor {
/// Create a new monitor with the given window size.
///
/// The monitor considers the sequence converged when the last
/// `window_size` estimates all lie within `epsilon` of each other.
pub fn new(window_size: usize) -> Self {
assert!(window_size > 0, "window_size must be > 0");
Self {
estimates: Vec::new(),
window_size,
}
}
/// Record a new estimate.
pub fn add_estimate(&mut self, value: f64) {
self.estimates.push(value);
}
/// Check whether the last `window_size` estimates have converged: i.e.,
/// the maximum minus the minimum within the window is less than `epsilon`.
pub fn has_converged(&self, epsilon: f64) -> bool {
if self.estimates.len() < self.window_size {
return false;
}
let window = &self.estimates[self.estimates.len() - self.window_size..];
let min = window.iter().copied().fold(f64::INFINITY, f64::min);
let max = window.iter().copied().fold(f64::NEG_INFINITY, f64::max);
(max - min) < epsilon
}
/// Return the most recent estimate, or `None` if no estimates have been
/// added.
pub fn current_estimate(&self) -> Option<f64> {
self.estimates.last().copied()
}
}
// ===========================================================================
// Tests
// ===========================================================================
#[cfg(test)]
mod tests {
use super::*;
// -----------------------------------------------------------------------
// z_score
// -----------------------------------------------------------------------
#[test]
fn z_score_95() {
let z = z_score(0.95);
assert!(
(z - 1.96).abs() < 0.01,
"z_score(0.95) = {z}, expected ~1.96"
);
}
#[test]
fn z_score_99() {
let z = z_score(0.99);
assert!(
(z - 2.576).abs() < 0.02,
"z_score(0.99) = {z}, expected ~2.576"
);
}
#[test]
fn z_score_90() {
let z = z_score(0.90);
assert!(
(z - 1.645).abs() < 0.01,
"z_score(0.90) = {z}, expected ~1.645"
);
}
// -----------------------------------------------------------------------
// Wilson interval
// -----------------------------------------------------------------------
#[test]
fn wilson_contains_true_proportion() {
// 50 successes out of 100 trials, true p = 0.5
let ci = wilson_interval(50, 100, 0.95);
assert!(
ci.lower < 0.5 && ci.upper > 0.5,
"Wilson CI should contain 0.5: {ci:?}"
);
assert_eq!(ci.method, "wilson");
assert!((ci.point_estimate - 0.5).abs() < 1e-12);
}
#[test]
fn wilson_asymmetric() {
// 1 success out of 100 -- the interval should still be reasonable.
let ci = wilson_interval(1, 100, 0.95);
assert!(ci.lower >= 0.0);
assert!(ci.upper <= 1.0);
assert!(ci.lower < 0.01);
assert!(ci.upper > 0.01);
}
#[test]
fn wilson_zero_successes() {
let ci = wilson_interval(0, 100, 0.95);
assert_eq!(ci.lower, 0.0);
assert!(ci.upper > 0.0);
assert!((ci.point_estimate - 0.0).abs() < 1e-12);
}
// -----------------------------------------------------------------------
// Clopper-Pearson
// -----------------------------------------------------------------------
#[test]
fn clopper_pearson_contains_true_proportion() {
let ci = clopper_pearson(50, 100, 0.95);
assert!(
ci.lower < 0.5 && ci.upper > 0.5,
"Clopper-Pearson CI should contain 0.5: {ci:?}"
);
assert_eq!(ci.method, "clopper-pearson");
}
#[test]
fn clopper_pearson_is_conservative() {
// Clopper-Pearson should be wider than Wilson for the same data.
let cp = clopper_pearson(50, 100, 0.95);
let w = wilson_interval(50, 100, 0.95);
let cp_width = cp.upper - cp.lower;
let w_width = w.upper - w.lower;
assert!(
cp_width >= w_width - 1e-10,
"Clopper-Pearson width ({cp_width}) should be >= Wilson width ({w_width})"
);
}
#[test]
fn clopper_pearson_edge_zero() {
let ci = clopper_pearson(0, 100, 0.95);
assert_eq!(ci.lower, 0.0);
assert!(ci.upper > 0.0);
}
#[test]
fn clopper_pearson_edge_all() {
let ci = clopper_pearson(100, 100, 0.95);
assert_eq!(ci.upper, 1.0);
assert!(ci.lower < 1.0);
}
// -----------------------------------------------------------------------
// Expectation value confidence
// -----------------------------------------------------------------------
#[test]
fn expectation_all_zero() {
// All shots measure |0>: <Z> = 1.0
let mut counts = HashMap::new();
counts.insert(vec![false], 1000);
let ci = expectation_confidence(&counts, 0, 0.95);
assert!((ci.point_estimate - 1.0).abs() < 1e-12);
assert!(ci.lower <= 1.0);
assert!(ci.upper >= 1.0 - 1e-6);
}
#[test]
fn expectation_all_one() {
// All shots measure |1>: <Z> = -1.0
let mut counts = HashMap::new();
counts.insert(vec![true], 1000);
let ci = expectation_confidence(&counts, 0, 0.95);
assert!((ci.point_estimate - (-1.0)).abs() < 1e-12);
}
#[test]
fn expectation_balanced() {
// Equal |0> and |1>: <Z> = 0.0
let mut counts = HashMap::new();
counts.insert(vec![false], 500);
counts.insert(vec![true], 500);
let ci = expectation_confidence(&counts, 0, 0.95);
assert!(
ci.point_estimate.abs() < 1e-12,
"expected 0.0, got {}",
ci.point_estimate
);
assert!(ci.lower < 0.0);
assert!(ci.upper > 0.0);
}
#[test]
fn expectation_multi_qubit() {
// Two-qubit system: qubit 0 always |0>, qubit 1 always |1>
let mut counts = HashMap::new();
counts.insert(vec![false, true], 1000);
let ci0 = expectation_confidence(&counts, 0, 0.95);
let ci1 = expectation_confidence(&counts, 1, 0.95);
assert!((ci0.point_estimate - 1.0).abs() < 1e-12);
assert!((ci1.point_estimate - (-1.0)).abs() < 1e-12);
}
// -----------------------------------------------------------------------
// Required shots
// -----------------------------------------------------------------------
#[test]
fn required_shots_standard() {
let n = required_shots(0.01, 0.05);
// ln(2/0.05) / (2 * 0.01^2) = ln(40) / 0.0002 = 3.6889 / 0.0002 = 18444.7
assert!(
(n as i64 - 18445).abs() <= 1,
"required_shots(0.01, 0.05) = {n}, expected ~18445"
);
}
#[test]
fn required_shots_loose() {
let n = required_shots(0.1, 0.1);
// ln(20) / 0.02 = 2.9957 / 0.02 = 149.79 -> 150
assert!(n >= 149 && n <= 151, "expected ~150, got {n}");
}
// -----------------------------------------------------------------------
// Total variation distance
// -----------------------------------------------------------------------
#[test]
fn tvd_identical() {
let mut p = HashMap::new();
p.insert(vec![false, false], 250);
p.insert(vec![false, true], 250);
p.insert(vec![true, false], 250);
p.insert(vec![true, true], 250);
let tvd = total_variation_distance(&p, &p);
assert!(
tvd.abs() < 1e-12,
"TVD of identical distributions should be 0, got {tvd}"
);
}
#[test]
fn tvd_completely_different() {
let mut p = HashMap::new();
p.insert(vec![false], 1000);
let mut q = HashMap::new();
q.insert(vec![true], 1000);
let tvd = total_variation_distance(&p, &q);
assert!(
(tvd - 1.0).abs() < 1e-12,
"TVD of completely different distributions should be 1.0, got {tvd}"
);
}
#[test]
fn tvd_partial_overlap() {
let mut p = HashMap::new();
p.insert(vec![false], 600);
p.insert(vec![true], 400);
let mut q = HashMap::new();
q.insert(vec![false], 400);
q.insert(vec![true], 600);
let tvd = total_variation_distance(&p, &q);
// |0.6 - 0.4| + |0.4 - 0.6| = 0.4, times 0.5 = 0.2
assert!((tvd - 0.2).abs() < 1e-12, "expected 0.2, got {tvd}");
}
#[test]
fn tvd_empty() {
let p: HashMap<Vec<bool>, usize> = HashMap::new();
let q: HashMap<Vec<bool>, usize> = HashMap::new();
let tvd = total_variation_distance(&p, &q);
assert!(tvd.abs() < 1e-12);
}
// -----------------------------------------------------------------------
// Chi-squared test
// -----------------------------------------------------------------------
#[test]
fn chi_squared_matching() {
// Observed matches expected perfectly.
let mut obs = HashMap::new();
obs.insert(vec![false, false], 250);
obs.insert(vec![false, true], 250);
obs.insert(vec![true, false], 250);
obs.insert(vec![true, true], 250);
let result = chi_squared_test(&obs, &obs);
assert!(
result.statistic < 1e-12,
"statistic should be ~0 for identical distributions, got {}",
result.statistic
);
assert!(
result.p_value > 0.05,
"p-value should be high for matching distributions, got {}",
result.p_value
);
assert!(!result.significant);
}
#[test]
fn chi_squared_very_different() {
let mut obs = HashMap::new();
obs.insert(vec![false], 1000);
obs.insert(vec![true], 0);
let mut exp = HashMap::new();
exp.insert(vec![false], 500);
exp.insert(vec![true], 500);
let result = chi_squared_test(&obs, &exp);
assert!(result.statistic > 100.0, "statistic should be large");
assert!(
result.p_value < 0.05,
"p-value should be small: {}",
result.p_value
);
assert!(result.significant);
}
#[test]
fn chi_squared_degrees_of_freedom() {
let mut obs = HashMap::new();
obs.insert(vec![false, false], 100);
obs.insert(vec![false, true], 100);
obs.insert(vec![true, false], 100);
obs.insert(vec![true, true], 100);
let result = chi_squared_test(&obs, &obs);
assert_eq!(result.degrees_of_freedom, 3);
}
// -----------------------------------------------------------------------
// Convergence monitor
// -----------------------------------------------------------------------
#[test]
fn convergence_detects_stable() {
let mut monitor = ConvergenceMonitor::new(5);
// Add a sequence that stabilises.
for &v in &[
0.5, 0.52, 0.49, 0.501, 0.499, 0.5001, 0.4999, 0.5002, 0.4998, 0.5001,
] {
monitor.add_estimate(v);
}
assert!(
monitor.has_converged(0.01),
"should have converged: last 5 values are within 0.01"
);
}
#[test]
fn convergence_rejects_unstable() {
let mut monitor = ConvergenceMonitor::new(5);
for &v in &[0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9] {
monitor.add_estimate(v);
}
assert!(
!monitor.has_converged(0.01),
"should NOT have converged: values oscillate widely"
);
}
#[test]
fn convergence_insufficient_data() {
let mut monitor = ConvergenceMonitor::new(10);
monitor.add_estimate(1.0);
monitor.add_estimate(1.0);
assert!(
!monitor.has_converged(0.1),
"not enough data for window_size=10"
);
}
#[test]
fn convergence_current_estimate() {
let mut monitor = ConvergenceMonitor::new(3);
assert_eq!(monitor.current_estimate(), None);
monitor.add_estimate(42.0);
assert_eq!(monitor.current_estimate(), Some(42.0));
monitor.add_estimate(43.0);
assert_eq!(monitor.current_estimate(), Some(43.0));
}
// -----------------------------------------------------------------------
// Binomial CDF helper
// -----------------------------------------------------------------------
#[test]
fn binomial_cdf_edge_cases() {
// P(X <= 10 | 10, 0.5) should be 1.0
let c = binomial_cdf(10, 10, 0.5);
assert!((c - 1.0).abs() < 1e-12);
// P(X <= 0 | 10, 0.5) = (0.5)^10 ~ 0.000977
let c = binomial_cdf(10, 0, 0.5);
assert!((c - 0.0009765625).abs() < 1e-8);
}
// -----------------------------------------------------------------------
// Normal CDF helper
// -----------------------------------------------------------------------
#[test]
fn normal_cdf_values() {
// Phi(0) = 0.5
assert!((normal_cdf(0.0) - 0.5).abs() < 1e-6);
// Phi(1.96) ~ 0.975
assert!((normal_cdf(1.96) - 0.975).abs() < 0.002);
// Phi(-1.96) ~ 0.025
assert!((normal_cdf(-1.96) - 0.025).abs() < 0.002);
}
}

View File

@@ -0,0 +1,683 @@
//! Hybrid classical-quantum control theory engine for QEC.
//!
//! Models the QEC feedback loop as a discrete-time control system:
//! `Physical qubits -> Syndrome extraction -> Classical decode -> Correction -> Repeat`
//!
//! If classical decoding latency exceeds the syndrome extraction period, errors
//! accumulate faster than they are corrected (the "backlog problem").
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
#[allow(unused_imports)]
use crate::error::{QuantumError, Result};
// -- 1. Control Loop Model --------------------------------------------------
/// Full QEC control loop: plant (quantum) + controller (classical) + state.
#[derive(Debug, Clone)]
pub struct QecControlLoop {
pub plant: QuantumPlant,
pub controller: ClassicalController,
pub state: ControlState,
}
/// Physical parameters of the quantum error-correction code.
#[derive(Debug, Clone)]
pub struct QuantumPlant {
pub code_distance: u32,
pub physical_error_rate: f64,
pub num_data_qubits: u32,
pub coherence_time_ns: u64,
}
/// Classical decoder performance characteristics.
#[derive(Debug, Clone)]
pub struct ClassicalController {
pub decode_latency_ns: u64,
pub decode_throughput: f64,
pub accuracy: f64,
}
/// Evolving state of the control loop during execution.
#[derive(Debug, Clone)]
pub struct ControlState {
pub logical_error_rate: f64,
pub error_backlog: f64,
pub rounds_decoded: u64,
pub total_latency_ns: u64,
}
impl ControlState {
pub fn new() -> Self {
Self {
logical_error_rate: 0.0,
error_backlog: 0.0,
rounds_decoded: 0,
total_latency_ns: 0,
}
}
}
impl Default for ControlState {
fn default() -> Self {
Self::new()
}
}
// -- 2. Stability Analysis ---------------------------------------------------
/// Result of analyzing the control loop's stability.
#[derive(Debug, Clone)]
pub struct StabilityCondition {
pub is_stable: bool,
pub margin: f64,
pub critical_latency_ns: u64,
pub critical_error_rate: f64,
pub convergence_rate: f64,
}
/// Syndrome extraction period (ns) for distance-d surface code.
/// 6 gate layers per cycle, ~20 ns per gate layer.
fn syndrome_period_ns(distance: u32) -> u64 {
6 * (distance as u64) * 20
}
/// Analyze stability: the loop is stable when `decode_latency < syndrome_period`.
pub fn analyze_stability(config: &QecControlLoop) -> StabilityCondition {
let d = config.plant.code_distance;
let p = config.plant.physical_error_rate;
let t_decode = config.controller.decode_latency_ns;
let acc = config.controller.accuracy;
let t_syndrome = syndrome_period_ns(d);
let margin = if t_decode == 0 {
f64::INFINITY
} else {
(t_syndrome as f64 / t_decode as f64) - 1.0
};
let is_stable = t_decode < t_syndrome;
let critical_latency_ns = t_syndrome;
let critical_error_rate = 0.01 * acc;
let error_injection = p * (d as f64);
let convergence_rate = if t_syndrome > 0 {
1.0 - (t_decode as f64 / t_syndrome as f64) - error_injection
} else {
-1.0
};
StabilityCondition {
is_stable,
margin,
critical_latency_ns,
critical_error_rate,
convergence_rate,
}
}
/// Maximum code distance stable for a given controller and physical error rate.
/// Iterates odd distances 3, 5, 7, ... until latency exceeds syndrome period.
pub fn max_stable_distance(controller: &ClassicalController, error_rate: f64) -> u32 {
let mut best = 3u32;
for d in (3..=201).step_by(2) {
if controller.decode_latency_ns >= syndrome_period_ns(d) {
break;
}
if error_rate >= 0.01 * controller.accuracy {
break;
}
best = d;
}
best
}
/// Minimum decoder throughput (syndromes/sec) to keep up with the plant.
pub fn min_throughput(plant: &QuantumPlant) -> f64 {
let t_ns = syndrome_period_ns(plant.code_distance);
if t_ns == 0 {
return f64::INFINITY;
}
1e9 / t_ns as f64
}
// -- 3. Resource Optimization ------------------------------------------------
/// Available hardware resources.
#[derive(Debug, Clone)]
pub struct ResourceBudget {
pub total_physical_qubits: u32,
pub classical_cores: u32,
pub classical_clock_ghz: f64,
pub total_time_budget_us: u64,
}
/// A candidate allocation on the Pareto frontier.
#[derive(Debug, Clone)]
pub struct OptimalAllocation {
pub code_distance: u32,
pub logical_qubits: u32,
pub decode_threads: u32,
pub expected_logical_error_rate: f64,
pub pareto_score: f64,
}
/// Enumerate Pareto-optimal resource allocations sorted by descending score.
pub fn optimize_allocation(
budget: &ResourceBudget,
error_rate: f64,
min_logical: u32,
) -> Vec<OptimalAllocation> {
let mut candidates = Vec::new();
for d in (3u32..=99).step_by(2) {
let qpl = 2 * d * d - 2 * d + 1;
if qpl == 0 {
continue;
}
let max_logical = budget.total_physical_qubits / qpl;
if max_logical < min_logical {
continue;
}
let decode_ns = if budget.classical_cores > 0 && budget.classical_clock_ghz > 0.0 {
((d as f64).powi(3) / (budget.classical_cores as f64 * budget.classical_clock_ghz))
as u64
} else {
u64::MAX
};
let decode_threads = budget.classical_cores.min(max_logical);
let p_th = 0.01_f64;
let ratio = error_rate / p_th;
let exp = (d as f64 + 1.0) / 2.0;
let p_logical = if ratio < 1.0 {
0.1 * ratio.powf(exp)
} else {
1.0_f64.min(ratio.powf(exp))
};
let t_syn = syndrome_period_ns(d);
let round_time = t_syn.max(decode_ns);
let budget_ns = budget.total_time_budget_us * 1000;
if round_time == 0 || budget_ns / round_time == 0 {
continue;
}
let score = if p_logical > 0.0 && max_logical > 0 {
(max_logical as f64).log2() - p_logical.log10()
} else if max_logical > 0 {
(max_logical as f64).log2() + 15.0
} else {
0.0
};
candidates.push(OptimalAllocation {
code_distance: d,
logical_qubits: max_logical,
decode_threads,
expected_logical_error_rate: p_logical,
pareto_score: score,
});
}
candidates.sort_by(|a, b| {
b.pareto_score
.partial_cmp(&a.pareto_score)
.unwrap_or(std::cmp::Ordering::Equal)
});
candidates
}
// -- 4. Latency Budget Planner -----------------------------------------------
/// Breakdown of time budgets for a single QEC round.
#[derive(Debug, Clone)]
pub struct LatencyBudget {
pub syndrome_extraction_ns: u64,
pub decode_ns: u64,
pub correction_ns: u64,
pub total_round_ns: u64,
pub slack_ns: i64,
}
/// Plan the latency budget for one QEC round at the given distance and decode time.
pub fn plan_latency_budget(distance: u32, decode_ns_per_syndrome: u64) -> LatencyBudget {
let extraction_ns = syndrome_period_ns(distance);
let correction_ns: u64 = 20;
let total_round_ns = extraction_ns + decode_ns_per_syndrome + correction_ns;
let slack_ns = extraction_ns as i64 - (decode_ns_per_syndrome as i64 + correction_ns as i64);
LatencyBudget {
syndrome_extraction_ns: extraction_ns,
decode_ns: decode_ns_per_syndrome,
correction_ns,
total_round_ns,
slack_ns,
}
}
// -- 5. Backlog Simulator ----------------------------------------------------
/// Full trace of a simulated control loop execution.
#[derive(Debug, Clone)]
pub struct SimulationTrace {
pub rounds: Vec<RoundSnapshot>,
pub converged: bool,
pub final_logical_error_rate: f64,
pub max_backlog: f64,
}
/// Snapshot of a single simulation round.
#[derive(Debug, Clone)]
pub struct RoundSnapshot {
pub round: u64,
pub errors_this_round: u32,
pub errors_corrected: u32,
pub backlog: f64,
pub decode_latency_ns: u64,
}
/// Monte Carlo simulation of the QEC control loop with seeded RNG.
pub fn simulate_control_loop(
config: &QecControlLoop,
num_rounds: u64,
seed: u64,
) -> SimulationTrace {
let mut rng = StdRng::seed_from_u64(seed);
let d = config.plant.code_distance;
let p = config.plant.physical_error_rate;
let n_q = config.plant.num_data_qubits;
let t_decode = config.controller.decode_latency_ns;
let acc = config.controller.accuracy;
let t_syn = syndrome_period_ns(d);
let mut rounds = Vec::with_capacity(num_rounds as usize);
let (mut backlog, mut max_backlog) = (0.0_f64, 0.0_f64);
let mut logical_errors = 0u64;
for r in 0..num_rounds {
let mut errs: u32 = 0;
for _ in 0..n_q {
if rng.gen::<f64>() < p {
errs += 1;
}
}
let jitter = 0.8 + 0.4 * rng.gen::<f64>();
let actual_lat = (t_decode as f64 * jitter) as u64;
let in_time = actual_lat < t_syn;
let corrected = if in_time {
let mut c = 0u32;
for _ in 0..errs {
if rng.gen::<f64>() < acc {
c += 1;
}
}
c
} else {
0
};
let uncorrected = errs.saturating_sub(corrected);
backlog += uncorrected as f64;
if in_time && backlog > 0.0 {
backlog -= (backlog * acc).min(backlog);
}
if backlog > max_backlog {
max_backlog = backlog;
}
if uncorrected > (d.saturating_sub(1)) / 2 {
logical_errors += 1;
}
rounds.push(RoundSnapshot {
round: r,
errors_this_round: errs,
errors_corrected: corrected,
backlog,
decode_latency_ns: actual_lat,
});
}
let final_logical_error_rate = if num_rounds > 0 {
logical_errors as f64 / num_rounds as f64
} else {
0.0
};
SimulationTrace {
rounds,
converged: backlog < 1.0,
final_logical_error_rate,
max_backlog,
}
}
// -- 6. Scaling Laws ---------------------------------------------------------
/// A power-law scaling relation: `y = prefactor * x^exponent`.
#[derive(Debug, Clone)]
pub struct ScalingLaw {
pub name: String,
pub exponent: f64,
pub prefactor: f64,
}
/// Classical overhead scaling for a named decoder.
/// Known: `"union_find"` O(n), `"mwpm"` O(n^3), `"neural"` O(n). Default: O(n^2).
pub fn classical_overhead_scaling(decoder_name: &str) -> ScalingLaw {
match decoder_name {
"union_find" => ScalingLaw {
name: "Union-Find decoder".into(),
exponent: 1.0,
prefactor: 1.0,
},
"mwpm" => ScalingLaw {
name: "Minimum Weight Perfect Matching".into(),
exponent: 3.0,
prefactor: 0.5,
},
"neural" => ScalingLaw {
name: "Neural network decoder".into(),
exponent: 1.0,
prefactor: 10.0,
},
_ => ScalingLaw {
name: format!("Generic decoder ({})", decoder_name),
exponent: 2.0,
prefactor: 1.0,
},
}
}
/// Logical error rate scaling: p_L ~ prefactor * (p/p_th)^exponent per distance step.
/// Below threshold the exponent is the suppression factor lambda = -ln(p/p_th).
pub fn logical_error_scaling(physical_rate: f64, threshold: f64) -> ScalingLaw {
if threshold <= 0.0 || physical_rate <= 0.0 {
return ScalingLaw {
name: "Logical error scaling (degenerate)".into(),
exponent: 0.0,
prefactor: 1.0,
};
}
if physical_rate >= threshold {
return ScalingLaw {
name: "Logical error scaling (above threshold)".into(),
exponent: 0.0,
prefactor: 1.0,
};
}
let lambda = -(physical_rate / threshold).ln();
ScalingLaw {
name: "Logical error scaling (below threshold)".into(),
exponent: lambda,
prefactor: 0.1,
}
}
// == Tests ===================================================================
#[cfg(test)]
mod tests {
use super::*;
fn make_plant(d: u32, p: f64) -> QuantumPlant {
QuantumPlant {
code_distance: d,
physical_error_rate: p,
num_data_qubits: d * d,
coherence_time_ns: 100_000,
}
}
fn make_controller(lat: u64, tp: f64, acc: f64) -> ClassicalController {
ClassicalController {
decode_latency_ns: lat,
decode_throughput: tp,
accuracy: acc,
}
}
fn make_loop(d: u32, p: f64, lat: u64) -> QecControlLoop {
QecControlLoop {
plant: make_plant(d, p),
controller: make_controller(lat, 1e6, 0.99),
state: ControlState::new(),
}
}
#[test]
fn test_control_state_new() {
let s = ControlState::new();
assert_eq!(s.logical_error_rate, 0.0);
assert_eq!(s.error_backlog, 0.0);
assert_eq!(s.rounds_decoded, 0);
assert_eq!(s.total_latency_ns, 0);
}
#[test]
fn test_control_state_default() {
assert_eq!(ControlState::default().rounds_decoded, 0);
}
#[test]
fn test_syndrome_period_scales() {
assert!(syndrome_period_ns(3) < syndrome_period_ns(5));
assert!(syndrome_period_ns(5) < syndrome_period_ns(7));
}
#[test]
fn test_syndrome_period_d3() {
assert_eq!(syndrome_period_ns(3), 360);
}
#[test]
fn test_stable_loop() {
let c = analyze_stability(&make_loop(5, 0.001, 100));
assert!(c.is_stable);
assert!(c.margin > 0.0);
assert!(c.convergence_rate > 0.0);
}
#[test]
fn test_unstable_loop() {
let c = analyze_stability(&make_loop(3, 0.001, 1000));
assert!(!c.is_stable);
assert!(c.margin < 0.0);
}
#[test]
fn test_stability_critical_latency() {
assert_eq!(
analyze_stability(&make_loop(5, 0.001, 100)).critical_latency_ns,
syndrome_period_ns(5)
);
}
#[test]
fn test_stability_zero_decode() {
let c = analyze_stability(&make_loop(3, 0.001, 0));
assert!(c.is_stable);
assert!(c.margin.is_infinite());
}
#[test]
fn test_max_stable_fast() {
assert!(max_stable_distance(&make_controller(100, 1e7, 0.99), 0.001) >= 3);
}
#[test]
fn test_max_stable_slow() {
assert!(max_stable_distance(&make_controller(10_000, 1e5, 0.99), 0.001) >= 3);
}
#[test]
fn test_max_stable_above_thresh() {
assert_eq!(
max_stable_distance(&make_controller(100, 1e7, 0.99), 0.5),
3
);
}
#[test]
fn test_min_throughput_d3() {
let tp = min_throughput(&make_plant(3, 0.001));
assert!(tp > 2e6 && tp < 3e6);
}
#[test]
fn test_min_throughput_ordering() {
assert!(min_throughput(&make_plant(3, 0.001)) > min_throughput(&make_plant(5, 0.001)));
}
#[test]
fn test_optimize_basic() {
let b = ResourceBudget {
total_physical_qubits: 10_000,
classical_cores: 8,
classical_clock_ghz: 3.0,
total_time_budget_us: 1_000,
};
let a = optimize_allocation(&b, 0.001, 1);
assert!(!a.is_empty());
for w in a.windows(2) {
assert!(w[0].pareto_score >= w[1].pareto_score);
}
}
#[test]
fn test_optimize_min_logical() {
let b = ResourceBudget {
total_physical_qubits: 100,
classical_cores: 4,
classical_clock_ghz: 2.0,
total_time_budget_us: 1_000,
};
for a in &optimize_allocation(&b, 0.001, 5) {
assert!(a.logical_qubits >= 5);
}
}
#[test]
fn test_optimize_insufficient() {
let b = ResourceBudget {
total_physical_qubits: 5,
classical_cores: 1,
classical_clock_ghz: 1.0,
total_time_budget_us: 100,
};
assert!(optimize_allocation(&b, 0.001, 1).is_empty());
}
#[test]
fn test_optimize_zero_cores() {
let b = ResourceBudget {
total_physical_qubits: 10_000,
classical_cores: 0,
classical_clock_ghz: 0.0,
total_time_budget_us: 1_000,
};
assert!(optimize_allocation(&b, 0.001, 1).is_empty());
}
#[test]
fn test_latency_budget_d3() {
let lb = plan_latency_budget(3, 100);
assert_eq!(lb.syndrome_extraction_ns, 360);
assert_eq!(lb.decode_ns, 100);
assert_eq!(lb.correction_ns, 20);
assert_eq!(lb.total_round_ns, 480);
assert_eq!(lb.slack_ns, 240);
}
#[test]
fn test_latency_budget_negative_slack() {
assert!(plan_latency_budget(3, 1000).slack_ns < 0);
}
#[test]
fn test_latency_budget_scales() {
assert!(
plan_latency_budget(7, 100).syndrome_extraction_ns
> plan_latency_budget(3, 100).syndrome_extraction_ns
);
}
#[test]
fn test_sim_stable() {
let t = simulate_control_loop(&make_loop(5, 0.001, 100), 100, 42);
assert_eq!(t.rounds.len(), 100);
assert!(t.converged);
assert!(t.max_backlog < 50.0);
}
#[test]
fn test_sim_unstable() {
let t = simulate_control_loop(&make_loop(3, 0.3, 1000), 200, 42);
assert_eq!(t.rounds.len(), 200);
assert!(t.max_backlog > 0.0);
}
#[test]
fn test_sim_zero_rounds() {
let t = simulate_control_loop(&make_loop(3, 0.001, 100), 0, 42);
assert!(t.rounds.is_empty());
assert_eq!(t.final_logical_error_rate, 0.0);
assert!(t.converged);
}
#[test]
fn test_sim_deterministic() {
let t1 = simulate_control_loop(&make_loop(5, 0.01, 200), 50, 123);
let t2 = simulate_control_loop(&make_loop(5, 0.01, 200), 50, 123);
for (a, b) in t1.rounds.iter().zip(t2.rounds.iter()) {
assert_eq!(a.errors_this_round, b.errors_this_round);
assert_eq!(a.errors_corrected, b.errors_corrected);
}
}
#[test]
fn test_sim_zero_error_rate() {
let t = simulate_control_loop(&make_loop(5, 0.0, 100), 50, 99);
assert!(t.converged);
assert_eq!(t.final_logical_error_rate, 0.0);
for s in &t.rounds {
assert_eq!(s.errors_this_round, 0);
}
}
#[test]
fn test_sim_snapshot_fields() {
let t = simulate_control_loop(&make_loop(3, 0.01, 100), 10, 7);
for (i, s) in t.rounds.iter().enumerate() {
assert_eq!(s.round, i as u64);
assert!(s.errors_corrected <= s.errors_this_round);
assert!(s.decode_latency_ns > 0);
}
}
#[test]
fn test_scaling_uf() {
let l = classical_overhead_scaling("union_find");
assert_eq!(l.exponent, 1.0);
assert!(l.name.contains("Union-Find"));
}
#[test]
fn test_scaling_mwpm() {
assert_eq!(classical_overhead_scaling("mwpm").exponent, 3.0);
}
#[test]
fn test_scaling_neural() {
let l = classical_overhead_scaling("neural");
assert_eq!(l.exponent, 1.0);
assert!(l.prefactor > 1.0);
}
#[test]
fn test_scaling_unknown() {
let l = classical_overhead_scaling("custom");
assert_eq!(l.exponent, 2.0);
assert!(l.name.contains("custom"));
}
#[test]
fn test_logical_below() {
let l = logical_error_scaling(0.001, 0.01);
assert!(l.exponent > 0.0);
assert_eq!(l.prefactor, 0.1);
}
#[test]
fn test_logical_above() {
let l = logical_error_scaling(0.05, 0.01);
assert_eq!(l.exponent, 0.0);
assert_eq!(l.prefactor, 1.0);
}
#[test]
fn test_logical_at() {
assert_eq!(logical_error_scaling(0.01, 0.01).exponent, 0.0);
}
#[test]
fn test_logical_zero_rate() {
assert_eq!(logical_error_scaling(0.0, 0.01).exponent, 0.0);
}
#[test]
fn test_logical_zero_thresh() {
assert_eq!(logical_error_scaling(0.001, 0.0).exponent, 0.0);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,26 @@
//! Error types for the ruQu quantum simulation engine
use crate::types::QubitIndex;
use thiserror::Error;
/// Errors that can occur during quantum simulation
#[derive(Error, Debug)]
pub enum QuantumError {
#[error("qubit limit exceeded: requested {requested}, maximum {maximum}")]
QubitLimitExceeded { requested: u32, maximum: u32 },
#[error("invalid qubit index {index} for {num_qubits}-qubit system")]
InvalidQubitIndex { index: QubitIndex, num_qubits: u32 },
#[error("memory allocation failed: need {required_bytes} bytes")]
MemoryAllocationFailed { required_bytes: usize },
#[error("invalid state vector: length {length} does not match 2^{num_qubits}")]
InvalidStateVector { length: usize, num_qubits: u32 },
#[error("circuit error: {0}")]
CircuitError(String),
}
/// Convenience alias used throughout the crate
pub type Result<T> = std::result::Result<T, QuantumError>;

View File

@@ -0,0 +1,200 @@
//! Quantum gate definitions and matrix representations
use crate::types::{Complex, QubitIndex};
use std::f64::consts::FRAC_1_SQRT_2;
/// Quantum gate operations
#[derive(Debug, Clone)]
pub enum Gate {
// ----- Single-qubit gates -----
H(QubitIndex),
X(QubitIndex),
Y(QubitIndex),
Z(QubitIndex),
S(QubitIndex),
Sdg(QubitIndex),
T(QubitIndex),
Tdg(QubitIndex),
Rx(QubitIndex, f64),
Ry(QubitIndex, f64),
Rz(QubitIndex, f64),
Phase(QubitIndex, f64),
// ----- Two-qubit gates -----
CNOT(QubitIndex, QubitIndex),
CZ(QubitIndex, QubitIndex),
SWAP(QubitIndex, QubitIndex),
Rzz(QubitIndex, QubitIndex, f64),
// ----- Special operations -----
Measure(QubitIndex),
Reset(QubitIndex),
Barrier,
// ----- Fused / custom single-qubit unitary (produced by optimizer) -----
Unitary1Q(QubitIndex, [[Complex; 2]; 2]),
}
impl Gate {
/// Return the qubit indices this gate acts on.
pub fn qubits(&self) -> Vec<QubitIndex> {
match self {
Gate::H(q)
| Gate::X(q)
| Gate::Y(q)
| Gate::Z(q)
| Gate::S(q)
| Gate::Sdg(q)
| Gate::T(q)
| Gate::Tdg(q)
| Gate::Rx(q, _)
| Gate::Ry(q, _)
| Gate::Rz(q, _)
| Gate::Phase(q, _)
| Gate::Measure(q)
| Gate::Reset(q)
| Gate::Unitary1Q(q, _) => vec![*q],
Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
vec![*q1, *q2]
}
Gate::Barrier => vec![],
}
}
/// Returns `true` for non-unitary operations (measurement, reset, barrier).
pub fn is_non_unitary(&self) -> bool {
matches!(self, Gate::Measure(_) | Gate::Reset(_) | Gate::Barrier)
}
/// Return the 2x2 unitary matrix for single-qubit gates; `None` otherwise.
pub fn matrix_1q(&self) -> Option<[[Complex; 2]; 2]> {
let c0 = Complex::ZERO;
let c1 = Complex::ONE;
let ci = Complex::I;
match self {
// H = (1/sqrt2) [[1, 1], [1, -1]]
Gate::H(_) => {
let h = Complex::new(FRAC_1_SQRT_2, 0.0);
Some([[h, h], [h, -h]])
}
// X = [[0, 1], [1, 0]]
Gate::X(_) => Some([[c0, c1], [c1, c0]]),
// Y = [[0, -i], [i, 0]]
Gate::Y(_) => Some([[c0, -ci], [ci, c0]]),
// Z = [[1, 0], [0, -1]]
Gate::Z(_) => Some([[c1, c0], [c0, -c1]]),
// S = [[1, 0], [0, i]]
Gate::S(_) => Some([[c1, c0], [c0, ci]]),
// Sdg = [[1, 0], [0, -i]]
Gate::Sdg(_) => Some([[c1, c0], [c0, -ci]]),
// T = [[1, 0], [0, e^(i*pi/4)]]
Gate::T(_) => {
let t = Complex::new(FRAC_1_SQRT_2, FRAC_1_SQRT_2);
Some([[c1, c0], [c0, t]])
}
// Tdg = [[1, 0], [0, e^(-i*pi/4)]]
Gate::Tdg(_) => {
let t = Complex::new(FRAC_1_SQRT_2, -FRAC_1_SQRT_2);
Some([[c1, c0], [c0, t]])
}
// Rx(theta) = [[cos(t/2), -i*sin(t/2)], [-i*sin(t/2), cos(t/2)]]
Gate::Rx(_, theta) => {
let half = *theta / 2.0;
let c = Complex::new(half.cos(), 0.0);
let s = Complex::new(0.0, -half.sin());
Some([[c, s], [s, c]])
}
// Ry(theta) = [[cos(t/2), -sin(t/2)], [sin(t/2), cos(t/2)]]
Gate::Ry(_, theta) => {
let half = *theta / 2.0;
let cos_h = half.cos();
let sin_h = half.sin();
Some([
[Complex::new(cos_h, 0.0), Complex::new(-sin_h, 0.0)],
[Complex::new(sin_h, 0.0), Complex::new(cos_h, 0.0)],
])
}
// Rz(theta) = [[e^(-i*t/2), 0], [0, e^(i*t/2)]]
Gate::Rz(_, theta) => {
let half = *theta / 2.0;
Some([
[Complex::from_polar(1.0, -half), c0],
[c0, Complex::from_polar(1.0, half)],
])
}
// Phase(theta) = [[1, 0], [0, e^(i*theta)]]
Gate::Phase(_, theta) => Some([[c1, c0], [c0, Complex::from_polar(1.0, *theta)]]),
// Custom fused unitary
Gate::Unitary1Q(_, m) => Some(*m),
// Not a single-qubit gate
_ => None,
}
}
/// Return the 4x4 unitary matrix for two-qubit gates; `None` otherwise.
///
/// Row / column ordering: index = q1_bit * 2 + q2_bit
/// where q1 is the first qubit argument and q2 the second.
pub fn matrix_2q(&self) -> Option<[[Complex; 4]; 4]> {
let c0 = Complex::ZERO;
let c1 = Complex::ONE;
match self {
// CNOT(control, target): |c,t> -> |c, t XOR c>
// Rows: |00>, |01>, |10>, |11> (control, target)
Gate::CNOT(_, _) => Some([
[c1, c0, c0, c0],
[c0, c1, c0, c0],
[c0, c0, c0, c1],
[c0, c0, c1, c0],
]),
// CZ: diag(1, 1, 1, -1)
Gate::CZ(_, _) => Some([
[c1, c0, c0, c0],
[c0, c1, c0, c0],
[c0, c0, c1, c0],
[c0, c0, c0, -c1],
]),
// SWAP: identity with rows 1 and 2 exchanged
Gate::SWAP(_, _) => Some([
[c1, c0, c0, c0],
[c0, c0, c1, c0],
[c0, c1, c0, c0],
[c0, c0, c0, c1],
]),
// Rzz(theta): diag(e^{-it/2}, e^{it/2}, e^{it/2}, e^{-it/2})
Gate::Rzz(_, _, theta) => {
let half = *theta / 2.0;
let en = Complex::from_polar(1.0, -half);
let ep = Complex::from_polar(1.0, half);
Some([
[en, c0, c0, c0],
[c0, ep, c0, c0],
[c0, c0, ep, c0],
[c0, c0, c0, en],
])
}
_ => None,
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,72 @@
//! # ruqu-core -- Quantum Execution Intelligence Engine
//!
//! Pure Rust quantum simulation and execution engine for the ruVector stack.
//! Supports state-vector (up to 32 qubits), stabilizer (millions), Clifford+T
//! (moderate T-count), and tensor network backends with automatic routing,
//! noise modeling, error mitigation, and cryptographic witness logging.
//!
//! ## Quick Start
//!
//! ```
//! use ruqu_core::prelude::*;
//!
//! // Create a Bell state |00> + |11> (unnormalised notation)
//! let mut circuit = QuantumCircuit::new(2);
//! circuit.h(0).cnot(0, 1);
//! let result = Simulator::run(&circuit).unwrap();
//! let probs = result.state.probabilities();
//! // probs ~= [0.5, 0.0, 0.0, 0.5]
//! ```
// -- Core simulation layer --
pub mod backend;
pub mod circuit;
pub mod circuit_analyzer;
pub mod error;
pub mod gate;
pub mod mixed_precision;
pub mod optimizer;
pub mod simd;
pub mod simulator;
pub mod stabilizer;
pub mod state;
pub mod tensor_network;
pub mod types;
// -- Scientific instrument layer (ADR-QE-015) --
pub mod confidence;
pub mod hardware;
pub mod mitigation;
pub mod noise;
pub mod qasm;
pub mod replay;
pub mod transpiler;
pub mod verification;
pub mod witness;
// -- SOTA differentiation layer --
pub mod clifford_t;
pub mod decomposition;
pub mod pipeline;
pub mod planner;
// -- QEC control plane --
pub mod control_theory;
pub mod decoder;
pub mod qec_scheduler;
pub mod subpoly_decoder;
// -- Benchmark & proof suite --
pub mod benchmark;
/// Re-exports of the most commonly used items.
pub mod prelude {
pub use crate::backend::BackendType;
pub use crate::circuit::QuantumCircuit;
pub use crate::error::{QuantumError, Result};
pub use crate::gate::Gate;
pub use crate::qasm::to_qasm3;
pub use crate::simulator::{ShotResult, SimConfig, SimulationResult, Simulator};
pub use crate::state::QuantumState;
pub use crate::types::*;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,746 @@
//! Mixed-precision (f32) quantum state vector.
//!
//! Provides a float32 complex type and state vector that uses half the memory
//! of the standard f64 state, enabling simulation of approximately one
//! additional qubit at each memory threshold.
//!
//! | Qubits | f64 memory | f32 memory |
//! |--------|-----------|-----------|
//! | 25 | 512 MiB | 256 MiB |
//! | 30 | 16 GiB | 8 GiB |
//! | 32 | 64 GiB | 32 GiB |
//! | 33 | 128 GiB | 64 GiB |
use crate::error::{QuantumError, Result};
use crate::gate::Gate;
use crate::types::{Complex, MeasurementOutcome, QubitIndex};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use std::fmt;
use std::ops::{Add, AddAssign, Mul, Neg, Sub};
// ---------------------------------------------------------------------------
// Complex32
// ---------------------------------------------------------------------------
/// Complex number using f32 precision (8 bytes vs 16 bytes for f64).
///
/// This is the building block for `QuantumStateF32`. Each amplitude occupies
/// half the memory of the standard `Complex` (f64) type, doubling the number
/// of amplitudes that fit in a given memory budget and thus enabling roughly
/// one additional qubit of simulation capacity.
#[derive(Clone, Copy, PartialEq)]
pub struct Complex32 {
/// Real component.
pub re: f32,
/// Imaginary component.
pub im: f32,
}
impl Complex32 {
/// The additive identity, 0 + 0i.
pub const ZERO: Self = Self { re: 0.0, im: 0.0 };
/// The multiplicative identity, 1 + 0i.
pub const ONE: Self = Self { re: 1.0, im: 0.0 };
/// The imaginary unit, 0 + 1i.
pub const I: Self = Self { re: 0.0, im: 1.0 };
/// Create a new complex number from real and imaginary parts.
#[inline]
pub fn new(re: f32, im: f32) -> Self {
Self { re, im }
}
/// Squared magnitude: |z|^2 = re^2 + im^2.
#[inline]
pub fn norm_sq(&self) -> f32 {
self.re * self.re + self.im * self.im
}
/// Magnitude: |z|.
#[inline]
pub fn norm(&self) -> f32 {
self.norm_sq().sqrt()
}
/// Complex conjugate: conj(a + bi) = a - bi.
#[inline]
pub fn conj(&self) -> Self {
Self {
re: self.re,
im: -self.im,
}
}
/// Convert from an f64 `Complex` by narrowing each component to f32.
#[inline]
pub fn from_f64(c: &Complex) -> Self {
Self {
re: c.re as f32,
im: c.im as f32,
}
}
/// Convert to an f64 `Complex` by widening each component to f64.
#[inline]
pub fn to_f64(&self) -> Complex {
Complex {
re: self.re as f64,
im: self.im as f64,
}
}
}
// ---------------------------------------------------------------------------
// Arithmetic trait implementations for Complex32
// ---------------------------------------------------------------------------
impl Add for Complex32 {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
Self {
re: self.re + rhs.re,
im: self.im + rhs.im,
}
}
}
impl Sub for Complex32 {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self {
Self {
re: self.re - rhs.re,
im: self.im - rhs.im,
}
}
}
impl Mul for Complex32 {
type Output = Self;
#[inline]
fn mul(self, rhs: Self) -> Self {
Self {
re: self.re * rhs.re - self.im * rhs.im,
im: self.re * rhs.im + self.im * rhs.re,
}
}
}
impl Neg for Complex32 {
type Output = Self;
#[inline]
fn neg(self) -> Self {
Self {
re: -self.re,
im: -self.im,
}
}
}
impl AddAssign for Complex32 {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.re += rhs.re;
self.im += rhs.im;
}
}
impl Mul<f32> for Complex32 {
type Output = Self;
#[inline]
fn mul(self, rhs: f32) -> Self {
Self {
re: self.re * rhs,
im: self.im * rhs,
}
}
}
impl fmt::Debug for Complex32 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "({}, {})", self.re, self.im)
}
}
impl fmt::Display for Complex32 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.im >= 0.0 {
write!(f, "{}+{}i", self.re, self.im)
} else {
write!(f, "{}{}i", self.re, self.im)
}
}
}
// ---------------------------------------------------------------------------
// QuantumStateF32
// ---------------------------------------------------------------------------
/// Maximum qubits for f32 state vector (1 more than f64 due to halved memory).
pub const MAX_QUBITS_F32: u32 = 33;
/// Quantum state using f32 precision for reduced memory usage.
///
/// Uses 8 bytes per amplitude instead of 16, enabling simulation of
/// approximately one additional qubit at each memory boundary. This is
/// intended for warm/exploratory runs; final verification can upcast to
/// the full `QuantumState` (f64) via [`QuantumStateF32::to_f64`].
pub struct QuantumStateF32 {
amplitudes: Vec<Complex32>,
num_qubits: u32,
rng: StdRng,
measurement_record: Vec<MeasurementOutcome>,
/// Running count of gate applications, used for error bound estimation.
gate_count: u64,
}
// ---------------------------------------------------------------------------
// Construction
// ---------------------------------------------------------------------------
impl QuantumStateF32 {
/// Create the |00...0> state for `num_qubits` qubits using f32 precision.
pub fn new(num_qubits: u32) -> Result<Self> {
if num_qubits == 0 {
return Err(QuantumError::CircuitError(
"cannot create quantum state with 0 qubits".into(),
));
}
if num_qubits > MAX_QUBITS_F32 {
return Err(QuantumError::QubitLimitExceeded {
requested: num_qubits,
maximum: MAX_QUBITS_F32,
});
}
let n = 1usize << num_qubits;
let mut amplitudes = vec![Complex32::ZERO; n];
amplitudes[0] = Complex32::ONE;
Ok(Self {
amplitudes,
num_qubits,
rng: StdRng::from_entropy(),
measurement_record: Vec::new(),
gate_count: 0,
})
}
/// Create the |00...0> state with a deterministic seed for reproducibility.
pub fn new_with_seed(num_qubits: u32, seed: u64) -> Result<Self> {
if num_qubits == 0 {
return Err(QuantumError::CircuitError(
"cannot create quantum state with 0 qubits".into(),
));
}
if num_qubits > MAX_QUBITS_F32 {
return Err(QuantumError::QubitLimitExceeded {
requested: num_qubits,
maximum: MAX_QUBITS_F32,
});
}
let n = 1usize << num_qubits;
let mut amplitudes = vec![Complex32::ZERO; n];
amplitudes[0] = Complex32::ONE;
Ok(Self {
amplitudes,
num_qubits,
rng: StdRng::seed_from_u64(seed),
measurement_record: Vec::new(),
gate_count: 0,
})
}
/// Downcast from an f64 `QuantumState`, narrowing each amplitude to f32.
///
/// The measurement record is cloned from the source state.
pub fn from_f64(state: &crate::state::QuantumState) -> Self {
let amplitudes: Vec<Complex32> = state
.state_vector()
.iter()
.map(|c| Complex32::from_f64(c))
.collect();
Self {
num_qubits: state.num_qubits(),
amplitudes,
rng: StdRng::from_entropy(),
measurement_record: state.measurement_record().to_vec(),
gate_count: 0,
}
}
/// Upcast to an f64 `QuantumState` for high-precision verification.
///
/// Each f32 amplitude is widened to f64. The measurement record is
/// **not** transferred since the f64 state is typically used for fresh
/// verification runs.
pub fn to_f64(&self) -> Result<crate::state::QuantumState> {
let amps: Vec<Complex> = self.amplitudes.iter().map(|c| c.to_f64()).collect();
crate::state::QuantumState::from_amplitudes(amps, self.num_qubits)
}
// -------------------------------------------------------------------
// Accessors
// -------------------------------------------------------------------
/// Number of qubits in this state.
pub fn num_qubits(&self) -> u32 {
self.num_qubits
}
/// Number of amplitudes (2^num_qubits).
pub fn num_amplitudes(&self) -> usize {
self.amplitudes.len()
}
/// Compute |amplitude|^2 for each basis state.
///
/// Probabilities are returned as f64 for downstream accuracy: the f32
/// norm-squared values are widened before being returned.
pub fn probabilities(&self) -> Vec<f64> {
self.amplitudes.iter().map(|a| a.norm_sq() as f64).collect()
}
/// Estimated memory in bytes for an f32 state of `num_qubits` qubits.
///
/// Each amplitude is 8 bytes (two f32 values).
pub fn estimate_memory(num_qubits: u32) -> usize {
(1usize << num_qubits) * std::mem::size_of::<Complex32>()
}
/// Returns the record of measurements performed on this state.
pub fn measurement_record(&self) -> &[MeasurementOutcome] {
&self.measurement_record
}
/// Rough upper-bound estimate of accumulated floating-point error from
/// using f32 instead of f64.
///
/// Each gate application introduces approximately `f32::EPSILON` (~1.2e-7)
/// of relative error per amplitude. Over `g` gates this compounds to
/// roughly `g * eps`. This is a conservative, heuristic bound.
pub fn precision_error_bound(&self) -> f64 {
(self.gate_count as f64) * (f32::EPSILON as f64)
}
// -------------------------------------------------------------------
// Gate dispatch
// -------------------------------------------------------------------
/// Apply a gate to the state, returning any measurement outcomes.
///
/// The gate's f64 matrices are converted to f32 before application.
pub fn apply_gate(&mut self, gate: &Gate) -> Result<Vec<MeasurementOutcome>> {
// Validate qubit indices.
for &q in gate.qubits().iter() {
self.validate_qubit(q)?;
}
match gate {
Gate::Barrier => Ok(vec![]),
Gate::Measure(q) => {
let outcome = self.measure(*q)?;
Ok(vec![outcome])
}
Gate::Reset(q) => {
self.reset_qubit(*q)?;
Ok(vec![])
}
// Two-qubit gates
Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
if q1 == q2 {
return Err(QuantumError::CircuitError(format!(
"two-qubit gate requires distinct qubits, got {} and {}",
q1, q2
)));
}
let matrix_f64 = gate.matrix_2q().unwrap();
let matrix = convert_matrix_2q(&matrix_f64);
self.apply_two_qubit_gate(*q1, *q2, &matrix);
self.gate_count += 1;
Ok(vec![])
}
// Everything else must be a single-qubit unitary.
other => {
if let Some(matrix_f64) = other.matrix_1q() {
let q = other.qubits()[0];
let matrix = convert_matrix_1q(&matrix_f64);
self.apply_single_qubit_gate(q, &matrix);
self.gate_count += 1;
Ok(vec![])
} else {
Err(QuantumError::CircuitError(format!(
"unsupported gate: {:?}",
other
)))
}
}
}
}
// -------------------------------------------------------------------
// Single-qubit gate kernel
// -------------------------------------------------------------------
/// Apply a 2x2 unitary matrix to the given qubit.
///
/// For each pair of amplitudes where the qubit bit is 0 (index `i`)
/// versus 1 (index `j = i + step`), the matrix transformation is applied.
pub fn apply_single_qubit_gate(&mut self, qubit: QubitIndex, matrix: &[[Complex32; 2]; 2]) {
let step = 1usize << qubit;
let n = self.amplitudes.len();
let mut block_start = 0;
while block_start < n {
for i in block_start..block_start + step {
let j = i + step;
let a = self.amplitudes[i]; // qubit = 0
let b = self.amplitudes[j]; // qubit = 1
self.amplitudes[i] = matrix[0][0] * a + matrix[0][1] * b;
self.amplitudes[j] = matrix[1][0] * a + matrix[1][1] * b;
}
block_start += step << 1;
}
}
// -------------------------------------------------------------------
// Two-qubit gate kernel
// -------------------------------------------------------------------
/// Apply a 4x4 unitary matrix to qubits `q1` and `q2`.
///
/// Matrix row/column index = q1_bit * 2 + q2_bit.
pub fn apply_two_qubit_gate(
&mut self,
q1: QubitIndex,
q2: QubitIndex,
matrix: &[[Complex32; 4]; 4],
) {
let q1_bit = 1usize << q1;
let q2_bit = 1usize << q2;
let n = self.amplitudes.len();
for base in 0..n {
// Process each group of 4 amplitudes exactly once: when both
// target bits in the index are zero.
if base & q1_bit != 0 || base & q2_bit != 0 {
continue;
}
let idxs = [
base, // q1=0, q2=0
base | q2_bit, // q1=0, q2=1
base | q1_bit, // q1=1, q2=0
base | q1_bit | q2_bit, // q1=1, q2=1
];
let vals = [
self.amplitudes[idxs[0]],
self.amplitudes[idxs[1]],
self.amplitudes[idxs[2]],
self.amplitudes[idxs[3]],
];
for r in 0..4 {
self.amplitudes[idxs[r]] = matrix[r][0] * vals[0]
+ matrix[r][1] * vals[1]
+ matrix[r][2] * vals[2]
+ matrix[r][3] * vals[3];
}
}
}
// -------------------------------------------------------------------
// Measurement
// -------------------------------------------------------------------
/// Measure a single qubit projectively.
///
/// 1. Compute P(qubit = 0) using f32 arithmetic.
/// 2. Sample the outcome.
/// 3. Collapse the state vector (zero out the other branch).
/// 4. Renormalise.
///
/// The probability stored in the returned `MeasurementOutcome` is widened
/// to f64 for compatibility with the rest of the engine.
pub fn measure(&mut self, qubit: QubitIndex) -> Result<MeasurementOutcome> {
self.validate_qubit(qubit)?;
let qubit_bit = 1usize << qubit;
let n = self.amplitudes.len();
// Probability of measuring |0> (accumulated in f32).
let mut p0: f32 = 0.0;
for i in 0..n {
if i & qubit_bit == 0 {
p0 += self.amplitudes[i].norm_sq();
}
}
let random: f64 = self.rng.gen();
let result = random >= p0 as f64; // true => measured |1>
let prob_f32 = if result { 1.0_f32 - p0 } else { p0 };
// Guard against division by zero (degenerate state).
let norm_factor = if prob_f32 > 0.0 {
1.0_f32 / prob_f32.sqrt()
} else {
0.0_f32
};
// Collapse + renormalise.
for i in 0..n {
let bit_is_one = i & qubit_bit != 0;
if bit_is_one == result {
self.amplitudes[i] = self.amplitudes[i] * norm_factor;
} else {
self.amplitudes[i] = Complex32::ZERO;
}
}
let outcome = MeasurementOutcome {
qubit,
result,
probability: prob_f32 as f64,
};
self.measurement_record.push(outcome.clone());
Ok(outcome)
}
// -------------------------------------------------------------------
// Reset
// -------------------------------------------------------------------
/// Reset a qubit to |0>.
///
/// Implemented as "measure, then flip if result was |1>".
fn reset_qubit(&mut self, qubit: QubitIndex) -> Result<()> {
let outcome = self.measure(qubit)?;
if outcome.result {
// Qubit collapsed to |1>; apply X to bring it back to |0>.
let x_matrix_f64 = Gate::X(qubit).matrix_1q().unwrap();
let x_matrix = convert_matrix_1q(&x_matrix_f64);
self.apply_single_qubit_gate(qubit, &x_matrix);
}
Ok(())
}
// -------------------------------------------------------------------
// Internal helpers
// -------------------------------------------------------------------
/// Validate that a qubit index is within range.
fn validate_qubit(&self, qubit: QubitIndex) -> Result<()> {
if qubit >= self.num_qubits {
return Err(QuantumError::InvalidQubitIndex {
index: qubit,
num_qubits: self.num_qubits,
});
}
Ok(())
}
}
// ---------------------------------------------------------------------------
// Matrix conversion helpers (f64 -> f32)
// ---------------------------------------------------------------------------
/// Convert a 2x2 f64 gate matrix to f32.
fn convert_matrix_1q(m: &[[Complex; 2]; 2]) -> [[Complex32; 2]; 2] {
[
[Complex32::from_f64(&m[0][0]), Complex32::from_f64(&m[0][1])],
[Complex32::from_f64(&m[1][0]), Complex32::from_f64(&m[1][1])],
]
}
/// Convert a 4x4 f64 gate matrix to f32.
fn convert_matrix_2q(m: &[[Complex; 4]; 4]) -> [[Complex32; 4]; 4] {
[
[
Complex32::from_f64(&m[0][0]),
Complex32::from_f64(&m[0][1]),
Complex32::from_f64(&m[0][2]),
Complex32::from_f64(&m[0][3]),
],
[
Complex32::from_f64(&m[1][0]),
Complex32::from_f64(&m[1][1]),
Complex32::from_f64(&m[1][2]),
Complex32::from_f64(&m[1][3]),
],
[
Complex32::from_f64(&m[2][0]),
Complex32::from_f64(&m[2][1]),
Complex32::from_f64(&m[2][2]),
Complex32::from_f64(&m[2][3]),
],
[
Complex32::from_f64(&m[3][0]),
Complex32::from_f64(&m[3][1]),
Complex32::from_f64(&m[3][2]),
Complex32::from_f64(&m[3][3]),
],
]
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
const EPS: f32 = 1e-6;
fn approx_eq_f32(a: f32, b: f32) -> bool {
(a - b).abs() < EPS
}
#[test]
fn complex32_arithmetic() {
let a = Complex32::new(1.0, 2.0);
let b = Complex32::new(3.0, -1.0);
let sum = a + b;
assert!(approx_eq_f32(sum.re, 4.0));
assert!(approx_eq_f32(sum.im, 1.0));
let diff = a - b;
assert!(approx_eq_f32(diff.re, -2.0));
assert!(approx_eq_f32(diff.im, 3.0));
// (1+2i)*(3-i) = 3 - i + 6i - 2i^2 = 3 + 5i + 2 = 5 + 5i
let prod = a * b;
assert!(approx_eq_f32(prod.re, 5.0));
assert!(approx_eq_f32(prod.im, 5.0));
let neg = -a;
assert!(approx_eq_f32(neg.re, -1.0));
assert!(approx_eq_f32(neg.im, -2.0));
assert!(approx_eq_f32(a.norm_sq(), 5.0));
assert!(approx_eq_f32(a.conj().im, -2.0));
}
#[test]
fn complex32_f64_conversion() {
let c64 = Complex::new(1.5, -2.5);
let c32 = Complex32::from_f64(&c64);
assert!(approx_eq_f32(c32.re, 1.5));
assert!(approx_eq_f32(c32.im, -2.5));
let back = c32.to_f64();
assert!((back.re - 1.5).abs() < 1e-6);
assert!((back.im - (-2.5)).abs() < 1e-6);
}
#[test]
fn state_f32_creation() {
let state = QuantumStateF32::new(3).unwrap();
assert_eq!(state.num_qubits(), 3);
assert_eq!(state.num_amplitudes(), 8);
let probs = state.probabilities();
assert!((probs[0] - 1.0).abs() < 1e-6);
for &p in &probs[1..] {
assert!(p.abs() < 1e-6);
}
}
#[test]
fn state_f32_zero_qubits_error() {
assert!(QuantumStateF32::new(0).is_err());
}
#[test]
fn state_f32_memory_estimate() {
// 3 qubits -> 8 amplitudes * 8 bytes = 64 bytes
assert_eq!(QuantumStateF32::estimate_memory(3), 64);
// 10 qubits -> 1024 amplitudes * 8 bytes = 8192 bytes
assert_eq!(QuantumStateF32::estimate_memory(10), 8192);
}
#[test]
fn state_f32_h_gate() {
let mut state = QuantumStateF32::new_with_seed(1, 42).unwrap();
state.apply_gate(&Gate::H(0)).unwrap();
let probs = state.probabilities();
assert!((probs[0] - 0.5).abs() < 1e-5);
assert!((probs[1] - 0.5).abs() < 1e-5);
}
#[test]
fn state_f32_bell_state() {
let mut state = QuantumStateF32::new_with_seed(2, 42).unwrap();
state.apply_gate(&Gate::H(0)).unwrap();
state.apply_gate(&Gate::CNOT(0, 1)).unwrap();
let probs = state.probabilities();
// Bell state: |00> + |11>, each with probability 0.5
assert!((probs[0] - 0.5).abs() < 1e-5);
assert!(probs[1].abs() < 1e-5);
assert!(probs[2].abs() < 1e-5);
assert!((probs[3] - 0.5).abs() < 1e-5);
}
#[test]
fn state_f32_measurement() {
let mut state = QuantumStateF32::new_with_seed(1, 42).unwrap();
state.apply_gate(&Gate::X(0)).unwrap();
let outcome = state.measure(0).unwrap();
assert!(outcome.result); // Must be |1> with certainty
assert!((outcome.probability - 1.0).abs() < 1e-5);
assert_eq!(state.measurement_record().len(), 1);
}
#[test]
fn state_f32_from_f64_roundtrip() {
let f64_state = crate::state::QuantumState::new_with_seed(3, 99).unwrap();
let f32_state = QuantumStateF32::from_f64(&f64_state);
assert_eq!(f32_state.num_qubits(), 3);
assert_eq!(f32_state.num_amplitudes(), 8);
// Upcast back and check probabilities are close.
let back = f32_state.to_f64().unwrap();
let p_orig = f64_state.probabilities();
let p_back = back.probabilities();
for (a, b) in p_orig.iter().zip(p_back.iter()) {
assert!((a - b).abs() < 1e-6);
}
}
#[test]
fn state_f32_precision_error_bound() {
let mut state = QuantumStateF32::new_with_seed(2, 42).unwrap();
assert_eq!(state.precision_error_bound(), 0.0);
state.apply_gate(&Gate::H(0)).unwrap();
state.apply_gate(&Gate::CNOT(0, 1)).unwrap();
// 2 gates applied
let bound = state.precision_error_bound();
assert!(bound > 0.0);
assert!(bound < 1e-5); // Should be very small for 2 gates
}
#[test]
fn state_f32_invalid_qubit() {
let mut state = QuantumStateF32::new(2).unwrap();
assert!(state.apply_gate(&Gate::H(5)).is_err());
}
#[test]
fn state_f32_distinct_qubits_check() {
let mut state = QuantumStateF32::new(2).unwrap();
assert!(state.apply_gate(&Gate::CNOT(0, 0)).is_err());
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,94 @@
//! Gate-fusion optimiser
//!
//! Scans a circuit for runs of consecutive single-qubit gates acting on the
//! same qubit and fuses them into a single `Unitary1Q` gate by multiplying
//! their 2x2 matrices.
use crate::circuit::QuantumCircuit;
use crate::gate::Gate;
use crate::types::Complex;
/// Multiply two 2x2 complex matrices: C = A * B.
pub fn mat_mul_2x2(a: &[[Complex; 2]; 2], b: &[[Complex; 2]; 2]) -> [[Complex; 2]; 2] {
[
[
a[0][0] * b[0][0] + a[0][1] * b[1][0],
a[0][0] * b[0][1] + a[0][1] * b[1][1],
],
[
a[1][0] * b[0][0] + a[1][1] * b[1][0],
a[1][0] * b[0][1] + a[1][1] * b[1][1],
],
]
}
/// Check whether two gates can be fused.
///
/// Both must be non-measurement single-qubit unitaries acting on the same qubit.
pub fn can_fuse(a: &Gate, b: &Gate) -> bool {
if a.is_non_unitary() || b.is_non_unitary() {
return false;
}
match (a.matrix_1q(), b.matrix_1q()) {
(Some(_), Some(_)) => {
let qa = a.qubits();
let qb = b.qubits();
qa.len() == 1 && qb.len() == 1 && qa[0] == qb[0]
}
_ => false,
}
}
/// Optimise a circuit by greedily fusing consecutive single-qubit gates
/// that act on the same qubit.
///
/// Returns a new, potentially shorter circuit.
pub fn fuse_gates(circuit: &QuantumCircuit) -> QuantumCircuit {
let mut result = QuantumCircuit::new(circuit.num_qubits());
let gates = circuit.gates();
let len = gates.len();
let mut i = 0;
while i < len {
// Attempt to start a fusion run if the current gate is a fusable 1Q gate.
if !gates[i].is_non_unitary() {
if let Some(first_matrix) = gates[i].matrix_1q() {
let q = gates[i].qubits()[0];
let mut fused = first_matrix;
let mut count = 1usize;
// Greedily absorb subsequent fusable 1Q gates on the same qubit.
while i + count < len {
let next = &gates[i + count];
if next.is_non_unitary() {
break;
}
if let Some(next_m) = next.matrix_1q() {
let nq = next.qubits();
if nq.len() == 1 && nq[0] == q {
// next_m is applied *after* fused, so C = next_m * fused.
fused = mat_mul_2x2(&next_m, &fused);
count += 1;
continue;
}
}
break;
}
if count > 1 {
result.add_gate(Gate::Unitary1Q(q, fused));
} else {
result.add_gate(gates[i].clone());
}
i += count;
continue;
}
}
// Non-fusable gate: pass through unchanged.
result.add_gate(gates[i].clone());
i += 1;
}
result
}

View File

@@ -0,0 +1,570 @@
//! End-to-end quantum execution pipeline.
//!
//! Orchestrates the full lifecycle of a quantum circuit execution:
//! plan -> decompose -> execute (per segment) -> stitch -> verify.
//!
//! # Example
//!
//! ```no_run
//! use ruqu_core::circuit::QuantumCircuit;
//! use ruqu_core::pipeline::{Pipeline, PipelineConfig};
//!
//! let mut circ = QuantumCircuit::new(4);
//! circ.h(0).cnot(0, 1).h(2).cnot(2, 3);
//!
//! let config = PipelineConfig::default();
//! let result = Pipeline::execute(&circ, &config).unwrap();
//! assert!(result.total_probability > 0.99);
//! ```
use std::collections::HashMap;
use crate::backend::BackendType;
use crate::circuit::QuantumCircuit;
use crate::decomposition::{decompose, stitch_results, CircuitPartition, DecompositionStrategy};
use crate::error::Result;
use crate::planner::{plan_execution, ExecutionPlan, PlannerConfig};
use crate::simulator::Simulator;
use crate::verification::{verify_circuit, VerificationResult};
// ---------------------------------------------------------------------------
// Configuration
// ---------------------------------------------------------------------------
/// Configuration for the execution pipeline.
#[derive(Debug, Clone)]
pub struct PipelineConfig {
/// Planner configuration (memory limits, noise, precision).
pub planner: PlannerConfig,
/// Maximum qubits per decomposed segment.
pub max_segment_qubits: u32,
/// Number of measurement shots per segment.
pub shots: u32,
/// Whether to run cross-backend verification.
pub verify: bool,
/// Deterministic seed for reproducibility.
pub seed: u64,
}
impl Default for PipelineConfig {
fn default() -> Self {
Self {
planner: PlannerConfig::default(),
max_segment_qubits: 25,
shots: 1024,
verify: true,
seed: 42,
}
}
}
// ---------------------------------------------------------------------------
// Pipeline result
// ---------------------------------------------------------------------------
/// Complete result from a pipeline execution.
#[derive(Debug, Clone)]
pub struct PipelineResult {
/// The execution plan that was used.
pub plan: ExecutionPlan,
/// How the circuit was decomposed.
pub decomposition: DecompositionSummary,
/// Per-segment execution results.
pub segment_results: Vec<SegmentResult>,
/// Combined (stitched) measurement distribution.
pub distribution: HashMap<Vec<bool>, f64>,
/// Total probability mass (should be ~1.0).
pub total_probability: f64,
/// Verification result, if verification was enabled.
pub verification: Option<VerificationResult>,
/// Fidelity estimate for the stitched result.
pub estimated_fidelity: f64,
}
/// Summary of the decomposition step.
#[derive(Debug, Clone)]
pub struct DecompositionSummary {
/// Number of segments the circuit was split into.
pub num_segments: usize,
/// Strategy that was used.
pub strategy: DecompositionStrategy,
/// Backends selected for each segment.
pub backends: Vec<BackendType>,
}
/// Result from executing a single segment.
#[derive(Debug, Clone)]
pub struct SegmentResult {
/// Which segment (0-indexed).
pub index: usize,
/// Backend that was used.
pub backend: BackendType,
/// Number of qubits in this segment.
pub num_qubits: u32,
/// Measurement distribution from this segment.
pub distribution: Vec<(Vec<bool>, f64)>,
}
// ---------------------------------------------------------------------------
// Pipeline implementation
// ---------------------------------------------------------------------------
/// The quantum execution pipeline.
pub struct Pipeline;
impl Pipeline {
/// Execute a quantum circuit through the full pipeline.
///
/// Steps:
/// 1. Plan: select optimal backend(s) via cost-model routing.
/// 2. Decompose: partition into independently-simulable segments.
/// 3. Execute: run each segment on its assigned backend.
/// 4. Stitch: combine segment results into a joint distribution.
/// 5. Verify: optionally cross-check against a reference backend.
pub fn execute(circuit: &QuantumCircuit, config: &PipelineConfig) -> Result<PipelineResult> {
// Step 1: Plan
let plan = plan_execution(circuit, &config.planner);
// Step 2: Decompose
let partition = decompose(circuit, config.max_segment_qubits);
let decomposition = DecompositionSummary {
num_segments: partition.segments.len(),
strategy: partition.strategy,
backends: partition.segments.iter().map(|s| s.backend).collect(),
};
// Step 3: Execute each segment
let mut segment_results = Vec::new();
let mut all_segment_distributions: Vec<Vec<(Vec<bool>, f64)>> = Vec::new();
for (idx, segment) in partition.segments.iter().enumerate() {
let shot_seed = config.seed.wrapping_add(idx as u64);
// Use the multi-shot simulator for each segment.
// The simulator always uses the state-vector backend internally,
// which is correct for segments that fit within max_segment_qubits.
let shot_result =
Simulator::run_shots(&segment.circuit, config.shots, Some(shot_seed))?;
// Convert the histogram counts to a probability distribution.
let dist = counts_to_distribution(&shot_result.counts);
segment_results.push(SegmentResult {
index: idx,
backend: resolve_backend(segment.backend),
num_qubits: segment.circuit.num_qubits(),
distribution: dist.clone(),
});
all_segment_distributions.push(dist);
}
// Step 4: Stitch results
//
// `stitch_results` expects a flat list of (bitstring, probability)
// pairs, grouped by segment. Segments are distinguished by
// consecutive runs of equal-length bitstrings (see decomposition.rs).
let flat_partitions: Vec<(Vec<bool>, f64)> =
all_segment_distributions.into_iter().flatten().collect();
let distribution = stitch_results(&flat_partitions);
let total_probability: f64 = distribution.values().sum();
// Step 5: Estimate fidelity
let estimated_fidelity = estimate_pipeline_fidelity(&segment_results, &partition);
// Step 6: Verify (optional)
let verification = if config.verify && circuit.num_qubits() <= 25 {
Some(verify_circuit(circuit, config.shots, config.seed))
} else {
None
};
Ok(PipelineResult {
plan,
decomposition,
segment_results,
distribution,
total_probability,
verification,
estimated_fidelity,
})
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/// Resolve a backend type for the simulator (Auto -> StateVector).
///
/// The basic simulator only supports state-vector execution, so backends
/// that are not directly simulable are mapped to StateVector. In a full
/// production system these would dispatch to their respective engines.
fn resolve_backend(backend: BackendType) -> BackendType {
match backend {
BackendType::Auto => BackendType::StateVector,
// CliffordT and Hardware are not directly supported by the basic
// simulator; fall back to StateVector for segments classified this
// way.
BackendType::CliffordT => BackendType::StateVector,
other => other,
}
}
/// Convert a shot-count histogram to a sorted probability distribution.
///
/// Each entry in the returned vector is `(bitstring, probability)`, sorted
/// in descending order of probability.
fn counts_to_distribution(counts: &HashMap<Vec<bool>, usize>) -> Vec<(Vec<bool>, f64)> {
let total: usize = counts.values().sum();
if total == 0 {
return Vec::new();
}
let total_f = total as f64;
let mut dist: Vec<(Vec<bool>, f64)> = counts
.iter()
.map(|(bits, &count)| (bits.clone(), count as f64 / total_f))
.collect();
// Sort by probability descending for deterministic output.
dist.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
dist
}
/// Estimate pipeline fidelity based on decomposition structure.
///
/// For a single segment (no decomposition), fidelity is 1.0.
/// For multiple segments, fidelity degrades based on the number of
/// cross-segment cuts and the entanglement that was severed.
fn estimate_pipeline_fidelity(segments: &[SegmentResult], partition: &CircuitPartition) -> f64 {
if segments.len() <= 1 {
return 1.0;
}
// Each spatial cut introduces fidelity loss proportional to the
// entanglement across the cut. Without full Schmidt decomposition,
// we use a conservative estimate:
// fidelity = per_cut_fidelity ^ (number of cuts)
let num_cuts = segments.len().saturating_sub(1);
let per_cut_fidelity: f64 = match partition.strategy {
DecompositionStrategy::Spatial | DecompositionStrategy::Hybrid => 0.95,
DecompositionStrategy::Temporal => 0.99,
DecompositionStrategy::None => 1.0,
};
per_cut_fidelity.powi(num_cuts as i32)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::circuit::QuantumCircuit;
#[test]
fn test_pipeline_bell_state() {
let mut circ = QuantumCircuit::new(2);
circ.h(0).cnot(0, 1);
let config = PipelineConfig {
shots: 1024,
verify: true,
seed: 42,
..PipelineConfig::default()
};
let result = Pipeline::execute(&circ, &config).unwrap();
assert!(
result.total_probability > 0.99,
"total_probability should be ~1.0, got {}",
result.total_probability
);
assert_eq!(result.decomposition.num_segments, 1);
assert_eq!(result.estimated_fidelity, 1.0);
}
#[test]
fn test_pipeline_disjoint_bells() {
// Two independent Bell pairs should decompose into 2 segments.
let mut circ = QuantumCircuit::new(4);
circ.h(0).cnot(0, 1);
circ.h(2).cnot(2, 3);
let config = PipelineConfig::default();
let result = Pipeline::execute(&circ, &config).unwrap();
assert!(
result.decomposition.num_segments >= 2,
"expected >= 2 segments for disjoint Bell pairs, got {}",
result.decomposition.num_segments
);
assert!(
result.total_probability > 0.95,
"total_probability should be ~1.0, got {}",
result.total_probability
);
assert!(
result.estimated_fidelity > 0.90,
"fidelity should be > 0.90, got {}",
result.estimated_fidelity
);
}
#[test]
fn test_pipeline_single_qubit() {
let mut circ = QuantumCircuit::new(1);
circ.h(0);
let config = PipelineConfig {
verify: false,
..PipelineConfig::default()
};
let result = Pipeline::execute(&circ, &config).unwrap();
assert!(
result.total_probability > 0.99,
"total_probability should be ~1.0, got {}",
result.total_probability
);
assert!(result.verification.is_none());
}
#[test]
fn test_pipeline_ghz_state() {
let mut circ = QuantumCircuit::new(5);
circ.h(0);
for i in 0..4u32 {
circ.cnot(i, i + 1);
}
let config = PipelineConfig {
shots: 2048,
seed: 123,
..PipelineConfig::default()
};
let result = Pipeline::execute(&circ, &config).unwrap();
assert!(
result.total_probability > 0.99,
"total_probability should be ~1.0, got {}",
result.total_probability
);
// GHZ state should have ~50% |00000> and ~50% |11111>.
let all_false = vec![false; 5];
let all_true = vec![true; 5];
let p_all_false = result.distribution.get(&all_false).copied().unwrap_or(0.0);
let p_all_true = result.distribution.get(&all_true).copied().unwrap_or(0.0);
assert!(
p_all_false > 0.3,
"GHZ should have significant |00000>, got {}",
p_all_false
);
assert!(
p_all_true > 0.3,
"GHZ should have significant |11111>, got {}",
p_all_true
);
}
#[test]
fn test_pipeline_config_default() {
let config = PipelineConfig::default();
assert_eq!(config.max_segment_qubits, 25);
assert_eq!(config.shots, 1024);
assert!(config.verify);
assert_eq!(config.seed, 42);
}
#[test]
fn test_pipeline_with_verification() {
let mut circ = QuantumCircuit::new(3);
circ.h(0).cnot(0, 1).cnot(1, 2);
let config = PipelineConfig {
verify: true,
shots: 512,
..PipelineConfig::default()
};
let result = Pipeline::execute(&circ, &config).unwrap();
assert!(
result.verification.is_some(),
"verification should be present when verify=true"
);
}
#[test]
fn test_resolve_backend() {
assert_eq!(resolve_backend(BackendType::Auto), BackendType::StateVector);
assert_eq!(
resolve_backend(BackendType::StateVector),
BackendType::StateVector
);
assert_eq!(
resolve_backend(BackendType::Stabilizer),
BackendType::Stabilizer
);
assert_eq!(
resolve_backend(BackendType::TensorNetwork),
BackendType::TensorNetwork
);
assert_eq!(
resolve_backend(BackendType::CliffordT),
BackendType::StateVector
);
}
#[test]
fn test_estimate_fidelity_single_segment() {
let segments = vec![SegmentResult {
index: 0,
backend: BackendType::StateVector,
num_qubits: 5,
distribution: vec![(vec![false; 5], 1.0)],
}];
let partition = CircuitPartition {
segments: vec![],
total_qubits: 5,
strategy: DecompositionStrategy::None,
};
assert_eq!(estimate_pipeline_fidelity(&segments, &partition), 1.0);
}
#[test]
fn test_estimate_fidelity_two_spatial_segments() {
let segments = vec![
SegmentResult {
index: 0,
backend: BackendType::StateVector,
num_qubits: 2,
distribution: vec![(vec![false, false], 0.5), (vec![true, true], 0.5)],
},
SegmentResult {
index: 1,
backend: BackendType::StateVector,
num_qubits: 2,
distribution: vec![(vec![false, false], 0.5), (vec![true, true], 0.5)],
},
];
let partition = CircuitPartition {
segments: vec![],
total_qubits: 4,
strategy: DecompositionStrategy::Spatial,
};
let fidelity = estimate_pipeline_fidelity(&segments, &partition);
// 0.95^1 = 0.95
assert!(
(fidelity - 0.95).abs() < 1e-10,
"expected fidelity 0.95, got {}",
fidelity
);
}
#[test]
fn test_estimate_fidelity_temporal() {
let segments = vec![
SegmentResult {
index: 0,
backend: BackendType::StateVector,
num_qubits: 2,
distribution: vec![(vec![false, false], 1.0)],
},
SegmentResult {
index: 1,
backend: BackendType::StateVector,
num_qubits: 2,
distribution: vec![(vec![false, false], 1.0)],
},
];
let partition = CircuitPartition {
segments: vec![],
total_qubits: 2,
strategy: DecompositionStrategy::Temporal,
};
let fidelity = estimate_pipeline_fidelity(&segments, &partition);
// 0.99^1 = 0.99
assert!(
(fidelity - 0.99).abs() < 1e-10,
"expected fidelity 0.99, got {}",
fidelity
);
}
#[test]
fn test_counts_to_distribution_empty() {
let counts: HashMap<Vec<bool>, usize> = HashMap::new();
let dist = counts_to_distribution(&counts);
assert!(dist.is_empty());
}
#[test]
fn test_counts_to_distribution_uniform() {
let mut counts: HashMap<Vec<bool>, usize> = HashMap::new();
counts.insert(vec![false], 500);
counts.insert(vec![true], 500);
let dist = counts_to_distribution(&counts);
assert_eq!(dist.len(), 2);
let total_prob: f64 = dist.iter().map(|(_, p)| p).sum();
assert!(
(total_prob - 1.0).abs() < 1e-10,
"distribution should sum to 1.0, got {}",
total_prob
);
}
#[test]
fn test_pipeline_no_verification_large_qubit() {
// A circuit with more than 25 qubits should skip verification
// even when verify=true (the pipeline caps at 25 qubits).
let mut circ = QuantumCircuit::new(26);
circ.h(0);
let config = PipelineConfig {
verify: true,
shots: 64,
..PipelineConfig::default()
};
let result = Pipeline::execute(&circ, &config).unwrap();
assert!(
result.verification.is_none(),
"verification should be skipped for > 25 qubits"
);
}
#[test]
fn test_pipeline_preserves_plan() {
let mut circ = QuantumCircuit::new(3);
circ.h(0).cnot(0, 1).cnot(1, 2);
let config = PipelineConfig::default();
let result = Pipeline::execute(&circ, &config).unwrap();
// The plan should reflect the planner's analysis.
assert!(
!result.plan.explanation.is_empty(),
"plan should have a non-empty explanation"
);
}
#[test]
fn test_pipeline_segment_results_match_decomposition() {
let mut circ = QuantumCircuit::new(4);
circ.h(0).cnot(0, 1);
circ.h(2).cnot(2, 3);
let config = PipelineConfig::default();
let result = Pipeline::execute(&circ, &config).unwrap();
assert_eq!(
result.segment_results.len(),
result.decomposition.num_segments,
"segment_results count should match decomposition num_segments"
);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,969 @@
//! OpenQASM 3.0 export bridge for `QuantumCircuit`.
//!
//! Converts a circuit into a valid OpenQASM 3.0 program string using the
//! `stdgates.inc` naming conventions. Arbitrary single-qubit unitaries
//! (`Unitary1Q`) are decomposed into ZYZ Euler angles and emitted as
//! `U(theta, phi, lambda)` gates.
use std::fmt::Write;
use crate::circuit::QuantumCircuit;
use crate::gate::Gate;
use crate::types::Complex;
// ---------------------------------------------------------------------------
// ZYZ Euler decomposition
// ---------------------------------------------------------------------------
/// Euler angles in the ZYZ convention: `Rz(phi) * Ry(theta) * Rz(lambda)`.
///
/// The overall unitary (up to a global phase) is:
///
/// ```text
/// U(theta, phi, lambda) = Rz(phi) * Ry(theta) * Rz(lambda)
/// ```
///
/// This matches the OpenQASM 3.0 `U(theta, phi, lambda)` gate definition.
struct ZyzAngles {
theta: f64,
phi: f64,
lambda: f64,
}
/// Decompose an arbitrary 2x2 unitary matrix into ZYZ Euler angles.
///
/// Given a unitary U, we find (theta, phi, lambda) such that
///
/// ```text
/// U = e^{i*alpha} * Rz(phi) * Ry(theta) * Rz(lambda)
/// ```
///
/// where alpha is a discarded global phase.
///
/// The parametrisation expands to:
///
/// ```text
/// U[0][0] = e^{ia} * cos(t/2) * e^{-i(p+l)/2}
/// U[0][1] = e^{ia} * (-sin(t/2)) * e^{-i(p-l)/2}
/// U[1][0] = e^{ia} * sin(t/2) * e^{i(p-l)/2}
/// U[1][1] = e^{ia} * cos(t/2) * e^{i(p+l)/2}
/// ```
///
/// We extract phi and lambda independently using products that isolate
/// each angle, avoiding the half-sum/half-difference 2*pi ambiguity.
fn decompose_zyz(u: &[[Complex; 2]; 2]) -> ZyzAngles {
let abs00 = u[0][0].norm();
let abs10 = u[1][0].norm();
// Clamp for numerical safety before acos
let cos_half_theta = abs00.clamp(0.0, 1.0);
let theta = 2.0 * cos_half_theta.acos();
let eps = 1e-12;
if abs00 > eps && abs10 > eps {
// General case: both cos(t/2) and sin(t/2) are nonzero.
//
// We extract phi and lambda directly from pairwise products of
// matrix elements that isolate each angle individually.
//
// From the parametrisation (global phase e^{ia} cancels in products
// of an element with the conjugate of another):
//
// conj(U[0][0]) * U[1][0] = cos(t/2) * sin(t/2) * e^{i*phi}
// => phi = arg(conj(U[0][0]) * U[1][0])
//
// U[1][1] * conj(U[1][0]) = cos(t/2) * sin(t/2) * e^{i*lambda}
// => lambda = arg(U[1][1] * conj(U[1][0]))
//
// These formulas give phi and lambda each in (-pi, pi] without
// the half-angle ambiguity that plagues the (sum, diff) approach.
let phi_complex = u[0][0].conj() * u[1][0];
let lambda_complex = u[1][1] * u[1][0].conj();
ZyzAngles {
theta,
phi: phi_complex.arg(),
lambda: lambda_complex.arg(),
}
} else if abs10 < eps {
// theta ~ 0: U is nearly diagonal (up to global phase).
// U[0][0] = e^{ia} * e^{-i(p+l)/2}
// U[1][1] = e^{ia} * e^{i(p+l)/2}
// => U[1][1] * conj(U[0][0]) = e^{i(p+l)}
// We only need phi + lambda. Set lambda = 0.
let diag_product = u[1][1] * u[0][0].conj();
ZyzAngles {
theta: 0.0,
phi: diag_product.arg(),
lambda: 0.0,
}
} else {
// theta ~ pi: U[0][0] ~ 0 and U[1][1] ~ 0.
// Only the off-diagonal elements carry useful phase info.
// U[1][0] = e^{ia} * sin(t/2) * e^{i(p-l)/2}
// U[0][1] = e^{ia} * (-sin(t/2)) * e^{-i(p-l)/2}
//
// U[1][0] * conj(-U[0][1]) = sin^2(t/2) * e^{i(p-l)}
//
// Set lambda = 0, phi = phi - lambda = arg of that product.
let neg_01 = -u[0][1];
let anti_product = u[1][0] * neg_01.conj();
ZyzAngles {
theta: std::f64::consts::PI,
phi: anti_product.arg(),
lambda: 0.0,
}
}
}
// ---------------------------------------------------------------------------
// Angle formatting helper
// ---------------------------------------------------------------------------
/// Format a floating-point angle for QASM output.
/// Uses enough precision to be lossless for common multiples of pi,
/// and trims unnecessary trailing zeros for readability.
fn fmt_angle(angle: f64) -> String {
// Use 15 significant digits (full f64 precision), then trim trailing zeros.
let s = format!("{:.15e}", angle);
// For angles that are "nice" decimals, prefer fixed notation.
// If the absolute value is in [1e-4, 1e6] use fixed, else scientific.
let abs = angle.abs();
if abs == 0.0 {
return "0".to_string();
}
if abs >= 1e-4 && abs < 1e6 {
// Fixed notation with enough precision
let s = format!("{:.15}", angle);
// Trim trailing zeros after the decimal point
let trimmed = s.trim_end_matches('0');
let trimmed = trimmed.trim_end_matches('.');
trimmed.to_string()
} else {
// Scientific notation
s
}
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/// Convert a `QuantumCircuit` into a valid OpenQASM 3.0 program string.
///
/// The output uses `stdgates.inc` gate names and follows the OpenQASM 3.0
/// specification for qubit/bit declarations, measurements, resets, and
/// barriers.
///
/// # Example
///
/// ```
/// use ruqu_core::circuit::QuantumCircuit;
/// use ruqu_core::qasm::to_qasm3;
///
/// let mut circuit = QuantumCircuit::new(2);
/// circuit.h(0).cnot(0, 1);
/// let qasm = to_qasm3(&circuit);
/// assert!(qasm.starts_with("OPENQASM 3.0;"));
/// ```
pub fn to_qasm3(circuit: &QuantumCircuit) -> String {
let n = circuit.num_qubits();
// Pre-allocate a reasonable buffer size
let mut out = String::with_capacity(256 + circuit.gates().len() * 30);
// Header
out.push_str("OPENQASM 3.0;\n");
out.push_str("include \"stdgates.inc\";\n");
// Register declarations
let _ = writeln!(out, "qubit[{}] q;", n);
let _ = writeln!(out, "bit[{}] c;", n);
// Gate body
for gate in circuit.gates() {
emit_gate(&mut out, gate);
}
out
}
/// Emit a single gate as one or more QASM lines.
fn emit_gate(out: &mut String, gate: &Gate) {
match gate {
// --- Single-qubit standard gates ---
Gate::H(q) => {
let _ = writeln!(out, "h q[{}];", q);
}
Gate::X(q) => {
let _ = writeln!(out, "x q[{}];", q);
}
Gate::Y(q) => {
let _ = writeln!(out, "y q[{}];", q);
}
Gate::Z(q) => {
let _ = writeln!(out, "z q[{}];", q);
}
Gate::S(q) => {
let _ = writeln!(out, "s q[{}];", q);
}
Gate::Sdg(q) => {
let _ = writeln!(out, "sdg q[{}];", q);
}
Gate::T(q) => {
let _ = writeln!(out, "t q[{}];", q);
}
Gate::Tdg(q) => {
let _ = writeln!(out, "tdg q[{}];", q);
}
// --- Parametric single-qubit gates ---
Gate::Rx(q, angle) => {
let _ = writeln!(out, "rx({}) q[{}];", fmt_angle(*angle), q);
}
Gate::Ry(q, angle) => {
let _ = writeln!(out, "ry({}) q[{}];", fmt_angle(*angle), q);
}
Gate::Rz(q, angle) => {
let _ = writeln!(out, "rz({}) q[{}];", fmt_angle(*angle), q);
}
Gate::Phase(q, angle) => {
let _ = writeln!(out, "p({}) q[{}];", fmt_angle(*angle), q);
}
// --- Two-qubit gates ---
Gate::CNOT(ctrl, tgt) => {
let _ = writeln!(out, "cx q[{}], q[{}];", ctrl, tgt);
}
Gate::CZ(q1, q2) => {
let _ = writeln!(out, "cz q[{}], q[{}];", q1, q2);
}
Gate::SWAP(q1, q2) => {
let _ = writeln!(out, "swap q[{}], q[{}];", q1, q2);
}
Gate::Rzz(q1, q2, angle) => {
let _ = writeln!(out, "rzz({}) q[{}], q[{}];", fmt_angle(*angle), q1, q2);
}
// --- Special operations ---
Gate::Measure(q) => {
let _ = writeln!(out, "c[{}] = measure q[{}];", q, q);
}
Gate::Reset(q) => {
let _ = writeln!(out, "reset q[{}];", q);
}
Gate::Barrier => {
out.push_str("barrier q;\n");
}
// --- Arbitrary single-qubit unitary (ZYZ decomposition) ---
Gate::Unitary1Q(q, matrix) => {
let angles = decompose_zyz(matrix);
let _ = writeln!(
out,
"U({}, {}, {}) q[{}];",
fmt_angle(angles.theta),
fmt_angle(angles.phi),
fmt_angle(angles.lambda),
q,
);
}
}
}
// ===========================================================================
// Tests
// ===========================================================================
#[cfg(test)]
mod tests {
use super::*;
use crate::circuit::QuantumCircuit;
use crate::gate::Gate;
use crate::types::Complex;
use std::f64::consts::{FRAC_1_SQRT_2, FRAC_PI_2, FRAC_PI_4, PI};
/// Helper: verify the QASM header is present and well-formed.
fn assert_valid_header(qasm: &str) {
let lines: Vec<&str> = qasm.lines().collect();
assert!(lines.len() >= 4, "QASM output should have at least 4 lines");
assert_eq!(lines[0], "OPENQASM 3.0;");
assert_eq!(lines[1], "include \"stdgates.inc\";");
assert!(lines[2].starts_with("qubit["));
assert!(lines[3].starts_with("bit["));
}
/// Collect only the gate lines (skip the 4-line header).
fn gate_lines(qasm: &str) -> Vec<String> {
qasm.lines()
.skip(4)
.map(|l| l.to_string())
.filter(|l| !l.is_empty())
.collect()
}
// ----- Bell State -----
#[test]
fn test_bell_state() {
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1);
let qasm = to_qasm3(&circuit);
assert_valid_header(&qasm);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 2);
assert_eq!(lines[0], "h q[0];");
assert_eq!(lines[1], "cx q[0], q[1];");
// Verify register sizes
assert!(qasm.contains("qubit[2] q;"));
assert!(qasm.contains("bit[2] c;"));
}
#[test]
fn test_bell_state_with_measurement() {
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1).measure(0).measure(1);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 4);
assert_eq!(lines[0], "h q[0];");
assert_eq!(lines[1], "cx q[0], q[1];");
assert_eq!(lines[2], "c[0] = measure q[0];");
assert_eq!(lines[3], "c[1] = measure q[1];");
}
// ----- GHZ State -----
#[test]
fn test_ghz_3_qubit() {
let mut circuit = QuantumCircuit::new(3);
circuit.h(0).cnot(0, 1).cnot(0, 2);
let qasm = to_qasm3(&circuit);
assert_valid_header(&qasm);
assert!(qasm.contains("qubit[3] q;"));
assert!(qasm.contains("bit[3] c;"));
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 3);
assert_eq!(lines[0], "h q[0];");
assert_eq!(lines[1], "cx q[0], q[1];");
assert_eq!(lines[2], "cx q[0], q[2];");
}
#[test]
fn test_ghz_5_qubit() {
let mut circuit = QuantumCircuit::new(5);
circuit.h(0);
for i in 1..5 {
circuit.cnot(0, i);
}
let qasm = to_qasm3(&circuit);
assert!(qasm.contains("qubit[5] q;"));
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 5);
assert_eq!(lines[0], "h q[0];");
for i in 1..5u32 {
assert_eq!(lines[i as usize], format!("cx q[0], q[{}];", i));
}
}
// ----- Parametric Gates -----
#[test]
fn test_parametric_rx_ry_rz() {
let mut circuit = QuantumCircuit::new(1);
circuit.rx(0, PI).ry(0, FRAC_PI_2).rz(0, FRAC_PI_4);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 3);
// Verify the gate names are correct
assert!(lines[0].starts_with("rx("));
assert!(lines[0].ends_with(") q[0];"));
assert!(lines[1].starts_with("ry("));
assert!(lines[2].starts_with("rz("));
// Verify angles parse back to original values within tolerance
let rx_angle: f64 = extract_angle(&lines[0]);
let ry_angle: f64 = extract_angle(&lines[1]);
let rz_angle: f64 = extract_angle(&lines[2]);
assert!((rx_angle - PI).abs() < 1e-10, "rx angle mismatch");
assert!((ry_angle - FRAC_PI_2).abs() < 1e-10, "ry angle mismatch");
assert!((rz_angle - FRAC_PI_4).abs() < 1e-10, "rz angle mismatch");
}
#[test]
fn test_phase_gate() {
let mut circuit = QuantumCircuit::new(1);
circuit.phase(0, PI / 3.0);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 1);
assert!(lines[0].starts_with("p("));
assert!(lines[0].ends_with(") q[0];"));
let angle = extract_angle(&lines[0]);
assert!((angle - PI / 3.0).abs() < 1e-10);
}
#[test]
fn test_rzz_gate() {
let mut circuit = QuantumCircuit::new(2);
circuit.rzz(0, 1, PI / 6.0);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 1);
assert!(lines[0].starts_with("rzz("));
assert!(lines[0].contains("q[0], q[1]"));
let angle = extract_angle(&lines[0]);
assert!((angle - PI / 6.0).abs() < 1e-10);
}
// ----- All Standard Gates -----
#[test]
fn test_all_single_qubit_standard_gates() {
let mut circuit = QuantumCircuit::new(1);
circuit.h(0);
circuit.x(0);
circuit.y(0);
circuit.z(0);
circuit.s(0);
circuit.add_gate(Gate::Sdg(0));
circuit.t(0);
circuit.add_gate(Gate::Tdg(0));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 8);
assert_eq!(lines[0], "h q[0];");
assert_eq!(lines[1], "x q[0];");
assert_eq!(lines[2], "y q[0];");
assert_eq!(lines[3], "z q[0];");
assert_eq!(lines[4], "s q[0];");
assert_eq!(lines[5], "sdg q[0];");
assert_eq!(lines[6], "t q[0];");
assert_eq!(lines[7], "tdg q[0];");
}
#[test]
fn test_two_qubit_gates() {
let mut circuit = QuantumCircuit::new(3);
circuit.cnot(0, 1);
circuit.cz(1, 2);
circuit.swap(0, 2);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 3);
assert_eq!(lines[0], "cx q[0], q[1];");
assert_eq!(lines[1], "cz q[1], q[2];");
assert_eq!(lines[2], "swap q[0], q[2];");
}
// ----- Special Operations -----
#[test]
fn test_reset() {
let mut circuit = QuantumCircuit::new(1);
circuit.reset(0);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 1);
assert_eq!(lines[0], "reset q[0];");
}
#[test]
fn test_barrier() {
let mut circuit = QuantumCircuit::new(3);
circuit.h(0).barrier().cnot(0, 1);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 3);
assert_eq!(lines[0], "h q[0];");
assert_eq!(lines[1], "barrier q;");
assert_eq!(lines[2], "cx q[0], q[1];");
}
#[test]
fn test_measure_all() {
let mut circuit = QuantumCircuit::new(3);
circuit.h(0).measure_all();
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 4);
assert_eq!(lines[0], "h q[0];");
assert_eq!(lines[1], "c[0] = measure q[0];");
assert_eq!(lines[2], "c[1] = measure q[1];");
assert_eq!(lines[3], "c[2] = measure q[2];");
}
// ----- Unitary1Q Decomposition -----
#[test]
fn test_unitary1q_identity() {
// Identity matrix should decompose to U(0, 0, 0) (or near-zero angles)
let identity = [
[Complex::new(1.0, 0.0), Complex::new(0.0, 0.0)],
[Complex::new(0.0, 0.0), Complex::new(1.0, 0.0)],
];
let mut circuit = QuantumCircuit::new(1);
circuit.add_gate(Gate::Unitary1Q(0, identity));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 1);
assert!(lines[0].starts_with("U("));
assert!(lines[0].ends_with(") q[0];"));
// Extract the three angles from U(theta, phi, lambda)
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
assert!(
theta.abs() < 1e-10,
"Identity theta should be ~0, got {}",
theta
);
// For identity, phi + lambda should be ~0 (mod 2*pi)
let sum = phi + lambda;
let sum_mod = ((sum % (2.0 * PI)) + 2.0 * PI) % (2.0 * PI);
assert!(
sum_mod.abs() < 1e-10 || (sum_mod - 2.0 * PI).abs() < 1e-10,
"Identity phi+lambda should be ~0 mod 2pi, got {}",
sum
);
}
#[test]
fn test_unitary1q_hadamard() {
// Hadamard matrix: (1/sqrt2) * [[1, 1], [1, -1]]
let h = FRAC_1_SQRT_2;
let hadamard = [
[Complex::new(h, 0.0), Complex::new(h, 0.0)],
[Complex::new(h, 0.0), Complex::new(-h, 0.0)],
];
let mut circuit = QuantumCircuit::new(1);
circuit.add_gate(Gate::Unitary1Q(0, hadamard));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 1);
assert!(lines[0].starts_with("U("));
// Hadamard is Rz(pi) * Ry(pi/2) * Rz(0) or equivalent.
// We verify the decomposition reconstructs the correct unitary.
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
let reconstructed = reconstruct_zyz(theta, phi, lambda);
assert_unitaries_equal_up_to_phase(&hadamard, &reconstructed);
}
#[test]
fn test_unitary1q_x_gate() {
// X gate: [[0, 1], [1, 0]]
let x_matrix = [
[Complex::new(0.0, 0.0), Complex::new(1.0, 0.0)],
[Complex::new(1.0, 0.0), Complex::new(0.0, 0.0)],
];
let mut circuit = QuantumCircuit::new(1);
circuit.add_gate(Gate::Unitary1Q(0, x_matrix));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
let reconstructed = reconstruct_zyz(theta, phi, lambda);
assert_unitaries_equal_up_to_phase(&x_matrix, &reconstructed);
}
#[test]
fn test_unitary1q_s_gate() {
// S gate: [[1, 0], [0, i]]
let s_matrix = [
[Complex::new(1.0, 0.0), Complex::new(0.0, 0.0)],
[Complex::new(0.0, 0.0), Complex::new(0.0, 1.0)],
];
let mut circuit = QuantumCircuit::new(1);
circuit.add_gate(Gate::Unitary1Q(0, s_matrix));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
// S is diagonal, so theta should be ~0
assert!(
theta.abs() < 1e-10,
"S gate theta should be ~0, got {}",
theta
);
let reconstructed = reconstruct_zyz(theta, phi, lambda);
assert_unitaries_equal_up_to_phase(&s_matrix, &reconstructed);
}
#[test]
fn test_unitary1q_arbitrary() {
// An arbitrary unitary: Rx(pi/3) in matrix form
let half = PI / 6.0;
let cos_h = half.cos();
let sin_h = half.sin();
let arb_matrix = [
[Complex::new(cos_h, 0.0), Complex::new(0.0, -sin_h)],
[Complex::new(0.0, -sin_h), Complex::new(cos_h, 0.0)],
];
let mut circuit = QuantumCircuit::new(1);
circuit.add_gate(Gate::Unitary1Q(0, arb_matrix));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
let reconstructed = reconstruct_zyz(theta, phi, lambda);
assert_unitaries_equal_up_to_phase(&arb_matrix, &reconstructed);
}
#[test]
fn test_unitary1q_y_gate() {
// Y gate: [[0, -i], [i, 0]]
let y_matrix = [
[Complex::new(0.0, 0.0), Complex::new(0.0, -1.0)],
[Complex::new(0.0, 1.0), Complex::new(0.0, 0.0)],
];
let mut circuit = QuantumCircuit::new(1);
circuit.add_gate(Gate::Unitary1Q(0, y_matrix));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
let reconstructed = reconstruct_zyz(theta, phi, lambda);
assert_unitaries_equal_up_to_phase(&y_matrix, &reconstructed);
}
// ----- Round-trip QASM text validation -----
#[test]
fn test_round_trip_text_validity() {
// Build a complex circuit with many gate types
let mut circuit = QuantumCircuit::new(4);
circuit
.h(0)
.x(1)
.y(2)
.z(3)
.s(0)
.t(1)
.rx(2, 1.234)
.ry(3, 2.345)
.rz(0, 0.567)
.phase(1, PI / 5.0)
.cnot(0, 1)
.cz(2, 3)
.swap(0, 3)
.rzz(1, 2, PI / 7.0)
.barrier()
.reset(0)
.measure(0)
.measure(1)
.measure(2)
.measure(3);
let qasm = to_qasm3(&circuit);
// Structural checks
assert_valid_header(&qasm);
assert!(qasm.contains("qubit[4] q;"));
assert!(qasm.contains("bit[4] c;"));
// Every line after the header should be a valid QASM statement
for line in qasm.lines().skip(4) {
if line.is_empty() {
continue;
}
assert!(
line.ends_with(';'),
"Line should end with semicolon: '{}'",
line
);
// Check it uses valid gate/operation keywords
let valid_starts = [
"h ", "x ", "y ", "z ", "s ", "sdg ", "t ", "tdg ", "rx(", "ry(", "rz(", "p(",
"rzz(", "cx ", "cz ", "swap ", "c[", "reset ", "barrier ", "U(",
];
assert!(
valid_starts.iter().any(|prefix| line.starts_with(prefix)),
"Line has unexpected format: '{}'",
line
);
}
}
#[test]
fn test_round_trip_gate_count() {
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1).measure(0).measure(1);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
// Number of QASM gate lines should match circuit gate count
assert_eq!(
lines.len(),
circuit.gate_count(),
"Gate line count should match circuit gate count"
);
}
#[test]
fn test_empty_circuit() {
let circuit = QuantumCircuit::new(1);
let qasm = to_qasm3(&circuit);
assert_valid_header(&qasm);
assert!(qasm.contains("qubit[1] q;"));
assert!(qasm.contains("bit[1] c;"));
let lines = gate_lines(&qasm);
assert!(lines.is_empty());
}
#[test]
fn test_qubit_indices_in_bounds() {
// Verify that qubit indices in the output never exceed the register size
let mut circuit = QuantumCircuit::new(4);
circuit.h(0).cnot(0, 3).swap(1, 2).measure(3);
let qasm = to_qasm3(&circuit);
// Extract all qubit references q[N] and verify N < 4
for line in qasm.lines().skip(4) {
let mut remaining = line;
while let Some(start) = remaining.find("q[") {
let after_q = &remaining[start + 2..];
if let Some(end) = after_q.find(']') {
let idx_str = &after_q[..end];
let idx: u32 = idx_str
.parse()
.unwrap_or_else(|_| panic!("Invalid qubit index in: '{}'", line));
assert!(idx < 4, "Qubit index {} out of bounds in: '{}'", idx, line);
remaining = &after_q[end + 1..];
} else {
break;
}
}
}
}
#[test]
fn test_negative_angle() {
let mut circuit = QuantumCircuit::new(1);
circuit.rx(0, -PI / 4.0);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 1);
let angle = extract_angle(&lines[0]);
assert!((angle - (-PI / 4.0)).abs() < 1e-10);
}
#[test]
fn test_zero_angle() {
let mut circuit = QuantumCircuit::new(1);
circuit.rx(0, 0.0);
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 1);
assert!(lines[0].starts_with("rx("));
}
#[test]
fn test_sdg_and_tdg_gates() {
let mut circuit = QuantumCircuit::new(1);
circuit.add_gate(Gate::Sdg(0));
circuit.add_gate(Gate::Tdg(0));
let qasm = to_qasm3(&circuit);
let lines = gate_lines(&qasm);
assert_eq!(lines.len(), 2);
assert_eq!(lines[0], "sdg q[0];");
assert_eq!(lines[1], "tdg q[0];");
}
#[test]
fn test_large_circuit_structure() {
// A more realistic circuit: QFT-like pattern on 4 qubits
let mut circuit = QuantumCircuit::new(4);
for i in 0..4u32 {
circuit.h(i);
for j in (i + 1)..4 {
let angle = PI / (1u32 << (j - i)) as f64;
circuit.phase(j, angle);
circuit.cnot(j, i);
}
}
circuit.measure_all();
let qasm = to_qasm3(&circuit);
assert_valid_header(&qasm);
assert!(qasm.contains("qubit[4] q;"));
// Verify it has at least the H gates and measurements
let lines = gate_lines(&qasm);
let h_count = lines.iter().filter(|l| l.starts_with("h ")).count();
let measure_count = lines.iter().filter(|l| l.contains("measure")).count();
assert_eq!(h_count, 4);
assert_eq!(measure_count, 4);
}
// ----- Test helpers -----
/// Extract a single angle from a gate line like `rx(1.234) q[0];`
fn extract_angle(line: &str) -> f64 {
let open = line.find('(').expect("No opening parenthesis");
let close = line.find(')').expect("No closing parenthesis");
let angle_str = &line[open + 1..close];
// Handle the case where there are multiple comma-separated angles (take the first)
let first = angle_str.split(',').next().unwrap().trim();
first
.parse::<f64>()
.unwrap_or_else(|e| panic!("Failed to parse angle '{}': {}", first, e))
}
/// Extract (theta, phi, lambda) from a U gate line like `U(t, p, l) q[0];`
fn extract_u_angles(line: &str) -> (f64, f64, f64) {
let open = line.find('(').expect("No opening parenthesis");
let close = line.find(')').expect("No closing parenthesis");
let inside = &line[open + 1..close];
let parts: Vec<&str> = inside.split(',').map(|s| s.trim()).collect();
assert_eq!(
parts.len(),
3,
"U gate should have 3 angles, got: {:?}",
parts
);
let theta: f64 = parts[0].parse().unwrap();
let phi: f64 = parts[1].parse().unwrap();
let lambda: f64 = parts[2].parse().unwrap();
(theta, phi, lambda)
}
/// Reconstruct the 2x2 unitary from ZYZ Euler angles:
/// U = Rz(phi) * Ry(theta) * Rz(lambda)
fn reconstruct_zyz(theta: f64, phi: f64, lambda: f64) -> [[Complex; 2]; 2] {
// Rz(a) = [[e^{-ia/2}, 0], [0, e^{ia/2}]]
// Ry(a) = [[cos(a/2), -sin(a/2)], [sin(a/2), cos(a/2)]]
let rz = |a: f64| -> [[Complex; 2]; 2] {
[
[Complex::from_polar(1.0, -a / 2.0), Complex::ZERO],
[Complex::ZERO, Complex::from_polar(1.0, a / 2.0)],
]
};
let ct = (theta / 2.0).cos();
let st = (theta / 2.0).sin();
let ry_theta: [[Complex; 2]; 2] = [
[Complex::new(ct, 0.0), Complex::new(-st, 0.0)],
[Complex::new(st, 0.0), Complex::new(ct, 0.0)],
];
let rz_phi = rz(phi);
let rz_lambda = rz(lambda);
// Multiply: Rz(phi) * Ry(theta)
let temp = mat_mul(&rz_phi, &ry_theta);
// Then: temp * Rz(lambda)
mat_mul(&temp, &rz_lambda)
}
/// Multiply two 2x2 complex matrices.
fn mat_mul(a: &[[Complex; 2]; 2], b: &[[Complex; 2]; 2]) -> [[Complex; 2]; 2] {
[
[
a[0][0] * b[0][0] + a[0][1] * b[1][0],
a[0][0] * b[0][1] + a[0][1] * b[1][1],
],
[
a[1][0] * b[0][0] + a[1][1] * b[1][0],
a[1][0] * b[0][1] + a[1][1] * b[1][1],
],
]
}
/// Assert that two 2x2 unitaries are equal up to a global phase factor.
///
/// Two unitaries U and V are equal up to global phase if there exists
/// some phase factor e^{i*alpha} such that U = e^{i*alpha} * V.
///
/// We find the phase by looking at the first non-zero element.
fn assert_unitaries_equal_up_to_phase(
expected: &[[Complex; 2]; 2],
actual: &[[Complex; 2]; 2],
) {
let eps = 1e-8;
// Find the first element with significant magnitude in `expected`
let mut phase = Complex::ZERO;
let mut found = false;
for i in 0..2 {
for j in 0..2 {
if expected[i][j].norm() > eps {
// phase = actual[i][j] / expected[i][j]
// = actual * conj(expected) / |expected|^2
let denom = expected[i][j].norm_sq();
phase = actual[i][j] * expected[i][j].conj() * (1.0 / denom);
found = true;
break;
}
}
if found {
break;
}
}
assert!(found, "Expected matrix is all zeros");
// Verify the phase has unit magnitude
assert!(
(phase.norm() - 1.0).abs() < eps,
"Phase factor should have unit magnitude, got {}",
phase.norm()
);
// Verify all elements match up to the global phase
for i in 0..2 {
for j in 0..2 {
let scaled = expected[i][j] * phase;
let diff = (actual[i][j] - scaled).norm();
assert!(
diff < eps,
"Mismatch at [{},{}]: expected {} (scaled), got {}. diff={}",
i,
j,
scaled,
actual[i][j],
diff,
);
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,565 @@
/// Deterministic replay engine for quantum simulation reproducibility.
///
/// Captures all parameters that affect simulation output (circuit structure,
/// seed, noise model, shots) into an [`ExecutionRecord`] so that any run can
/// be replayed bit-for-bit. Also provides [`StateCheckpoint`] for snapshotting
/// the raw amplitude vector mid-simulation.
use crate::circuit::QuantumCircuit;
use crate::gate::Gate;
use crate::simulator::{SimConfig, Simulator};
use crate::types::{Complex, NoiseModel};
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::time::{SystemTime, UNIX_EPOCH};
// ---------------------------------------------------------------------------
// NoiseConfig (serialisable snapshot of a NoiseModel)
// ---------------------------------------------------------------------------
/// Snapshot of a noise model configuration suitable for storage and replay.
#[derive(Debug, Clone, PartialEq)]
pub struct NoiseConfig {
pub depolarizing_rate: f64,
pub bit_flip_rate: f64,
pub phase_flip_rate: f64,
}
impl NoiseConfig {
/// Create a `NoiseConfig` from the simulator's [`NoiseModel`].
pub fn from_noise_model(m: &NoiseModel) -> Self {
Self {
depolarizing_rate: m.depolarizing_rate,
bit_flip_rate: m.bit_flip_rate,
phase_flip_rate: m.phase_flip_rate,
}
}
/// Convert back to a [`NoiseModel`] for replay.
pub fn to_noise_model(&self) -> NoiseModel {
NoiseModel {
depolarizing_rate: self.depolarizing_rate,
bit_flip_rate: self.bit_flip_rate,
phase_flip_rate: self.phase_flip_rate,
}
}
}
// ---------------------------------------------------------------------------
// ExecutionRecord
// ---------------------------------------------------------------------------
/// Complete record of every parameter that can influence simulation output.
///
/// Two runs with the same `ExecutionRecord` and the same circuit must produce
/// identical measurement outcomes (assuming deterministic seeding).
#[derive(Debug, Clone)]
pub struct ExecutionRecord {
/// Deterministic hash of the circuit structure (gate types, parameters,
/// qubit indices). Computed via [`ReplayEngine::circuit_hash`].
pub circuit_hash: [u8; 32],
/// RNG seed used for measurement sampling and noise channels.
pub seed: u64,
/// Backend identifier string (e.g. `"state_vector"`).
pub backend: String,
/// Noise model parameters, if noise was enabled.
pub noise_config: Option<NoiseConfig>,
/// Number of measurement shots.
pub shots: u32,
/// Software version that produced this record.
pub software_version: String,
/// UTC timestamp (seconds since UNIX epoch) when the record was created.
pub timestamp_utc: u64,
}
// ---------------------------------------------------------------------------
// ReplayEngine
// ---------------------------------------------------------------------------
/// Engine that records execution parameters and replays simulations for
/// reproducibility verification.
pub struct ReplayEngine {
/// Software version embedded in every record.
version: String,
}
impl ReplayEngine {
/// Create a new `ReplayEngine` using the crate version from `Cargo.toml`.
pub fn new() -> Self {
Self {
version: env!("CARGO_PKG_VERSION").to_string(),
}
}
/// Capture all parameters needed to deterministically replay a simulation.
///
/// The returned [`ExecutionRecord`] is self-contained: given the same
/// circuit, the record holds enough information to reproduce the exact
/// measurement outcomes.
pub fn record_execution(
&self,
circuit: &QuantumCircuit,
config: &SimConfig,
shots: u32,
) -> ExecutionRecord {
let seed = config.seed.unwrap_or(0);
let noise_config = config.noise.as_ref().map(NoiseConfig::from_noise_model);
let timestamp_utc = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
ExecutionRecord {
circuit_hash: Self::circuit_hash(circuit),
seed,
backend: "state_vector".to_string(),
noise_config,
shots,
software_version: self.version.clone(),
timestamp_utc,
}
}
/// Replay a simulation using the parameters in `record` and verify that
/// the measurement outcomes match a fresh run.
///
/// Returns `true` when the replayed results are identical to a reference
/// run seeded with the same parameters. Both runs use the exact same seed
/// so the RNG sequences must agree.
pub fn replay(&self, record: &ExecutionRecord, circuit: &QuantumCircuit) -> bool {
// Verify circuit hash matches the record.
let current_hash = Self::circuit_hash(circuit);
if current_hash != record.circuit_hash {
return false;
}
let noise = record
.noise_config
.as_ref()
.map(NoiseConfig::to_noise_model);
let config = SimConfig {
seed: Some(record.seed),
noise: noise.clone(),
shots: None,
};
// Run twice with the same config and compare measurements.
let run_a = Simulator::run_with_config(circuit, &config);
let config_b = SimConfig {
seed: Some(record.seed),
noise,
shots: None,
};
let run_b = Simulator::run_with_config(circuit, &config_b);
match (run_a, run_b) {
(Ok(a), Ok(b)) => {
if a.measurements.len() != b.measurements.len() {
return false;
}
a.measurements
.iter()
.zip(b.measurements.iter())
.all(|(ma, mb)| {
ma.qubit == mb.qubit
&& ma.result == mb.result
&& (ma.probability - mb.probability).abs() < 1e-12
})
}
_ => false,
}
}
/// Compute a deterministic 32-byte hash of a circuit's structure.
///
/// The hash captures, for every gate: its type discriminant, the qubit
/// indices it acts on, and any continuous parameters (rotation angles).
/// Two circuits with the same gate sequence produce the same hash.
///
/// Uses `DefaultHasher` (SipHash-based) run twice with different seeds to
/// fill 32 bytes.
pub fn circuit_hash(circuit: &QuantumCircuit) -> [u8; 32] {
// Build a canonical byte representation of the circuit.
let canonical = Self::circuit_canonical_bytes(circuit);
let mut result = [0u8; 32];
// First 8 bytes: hash with seed 0.
let h0 = hash_bytes_with_seed(&canonical, 0);
result[0..8].copy_from_slice(&h0.to_le_bytes());
// Next 8 bytes: hash with seed 1.
let h1 = hash_bytes_with_seed(&canonical, 1);
result[8..16].copy_from_slice(&h1.to_le_bytes());
// Next 8 bytes: hash with seed 2.
let h2 = hash_bytes_with_seed(&canonical, 2);
result[16..24].copy_from_slice(&h2.to_le_bytes());
// Final 8 bytes: hash with seed 3.
let h3 = hash_bytes_with_seed(&canonical, 3);
result[24..32].copy_from_slice(&h3.to_le_bytes());
result
}
/// Serialise the circuit into a canonical byte sequence.
///
/// The encoding is: `[num_qubits:4 bytes LE]` followed by, for each gate,
/// `[discriminant:1 byte][qubit indices][f64 parameters as LE bytes]`.
fn circuit_canonical_bytes(circuit: &QuantumCircuit) -> Vec<u8> {
let mut buf = Vec::new();
// Circuit metadata.
buf.extend_from_slice(&circuit.num_qubits().to_le_bytes());
for gate in circuit.gates() {
// Push a discriminant byte for the gate variant.
let (disc, qubits, params) = gate_components(gate);
buf.push(disc);
for q in &qubits {
buf.extend_from_slice(&q.to_le_bytes());
}
for p in &params {
buf.extend_from_slice(&p.to_le_bytes());
}
}
buf
}
}
impl Default for ReplayEngine {
fn default() -> Self {
Self::new()
}
}
// ---------------------------------------------------------------------------
// StateCheckpoint
// ---------------------------------------------------------------------------
/// Snapshot of a quantum state-vector that can be serialised and restored.
///
/// The internal representation stores amplitudes as interleaved `(re, im)` f64
/// pairs in little-endian byte order so that the checkpoint is
/// platform-independent.
#[derive(Debug, Clone)]
pub struct StateCheckpoint {
data: Vec<u8>,
num_amplitudes: usize,
}
impl StateCheckpoint {
/// Capture the current state-vector amplitudes into a checkpoint.
pub fn capture(amplitudes: &[Complex]) -> Self {
let mut data = Vec::with_capacity(amplitudes.len() * 16);
for amp in amplitudes {
data.extend_from_slice(&amp.re.to_le_bytes());
data.extend_from_slice(&amp.im.to_le_bytes());
}
Self {
data,
num_amplitudes: amplitudes.len(),
}
}
/// Restore the amplitudes from this checkpoint.
pub fn restore(&self) -> Vec<Complex> {
let mut amps = Vec::with_capacity(self.num_amplitudes);
for i in 0..self.num_amplitudes {
let offset = i * 16;
let re = f64::from_le_bytes(
self.data[offset..offset + 8]
.try_into()
.expect("checkpoint data corrupted"),
);
let im = f64::from_le_bytes(
self.data[offset + 8..offset + 16]
.try_into()
.expect("checkpoint data corrupted"),
);
amps.push(Complex::new(re, im));
}
amps
}
/// Total size of the serialised checkpoint in bytes.
pub fn size_bytes(&self) -> usize {
self.data.len()
}
}
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
/// Hash a byte slice using `DefaultHasher` seeded deterministically.
///
/// `DefaultHasher` does not expose a seed parameter so we prepend the seed
/// bytes to the data to obtain different digests for different seeds.
fn hash_bytes_with_seed(data: &[u8], seed: u64) -> u64 {
let mut hasher = DefaultHasher::new();
seed.hash(&mut hasher);
data.hash(&mut hasher);
hasher.finish()
}
/// Decompose a `Gate` into a discriminant byte, qubit indices, and f64
/// parameters. This is the single source of truth for the canonical encoding.
fn gate_components(gate: &Gate) -> (u8, Vec<u32>, Vec<f64>) {
match gate {
Gate::H(q) => (0, vec![*q], vec![]),
Gate::X(q) => (1, vec![*q], vec![]),
Gate::Y(q) => (2, vec![*q], vec![]),
Gate::Z(q) => (3, vec![*q], vec![]),
Gate::S(q) => (4, vec![*q], vec![]),
Gate::Sdg(q) => (5, vec![*q], vec![]),
Gate::T(q) => (6, vec![*q], vec![]),
Gate::Tdg(q) => (7, vec![*q], vec![]),
Gate::Rx(q, angle) => (8, vec![*q], vec![*angle]),
Gate::Ry(q, angle) => (9, vec![*q], vec![*angle]),
Gate::Rz(q, angle) => (10, vec![*q], vec![*angle]),
Gate::Phase(q, angle) => (11, vec![*q], vec![*angle]),
Gate::CNOT(c, t) => (12, vec![*c, *t], vec![]),
Gate::CZ(a, b) => (13, vec![*a, *b], vec![]),
Gate::SWAP(a, b) => (14, vec![*a, *b], vec![]),
Gate::Rzz(a, b, angle) => (15, vec![*a, *b], vec![*angle]),
Gate::Measure(q) => (16, vec![*q], vec![]),
Gate::Reset(q) => (17, vec![*q], vec![]),
Gate::Barrier => (18, vec![], vec![]),
Gate::Unitary1Q(q, m) => {
// Encode the 4 complex entries (8 f64 values).
let params = vec![
m[0][0].re, m[0][0].im, m[0][1].re, m[0][1].im, m[1][0].re, m[1][0].im, m[1][1].re,
m[1][1].im,
];
(19, vec![*q], params)
}
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::circuit::QuantumCircuit;
use crate::simulator::SimConfig;
use crate::types::Complex;
/// Same seed produces identical measurement results.
#[test]
fn same_seed_identical_results() {
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1).measure(0).measure(1);
let config = SimConfig {
seed: Some(42),
noise: None,
shots: None,
};
let r1 = Simulator::run_with_config(&circuit, &config).unwrap();
let r2 = Simulator::run_with_config(&circuit, &config).unwrap();
assert_eq!(r1.measurements.len(), r2.measurements.len());
for (a, b) in r1.measurements.iter().zip(r2.measurements.iter()) {
assert_eq!(a.qubit, b.qubit);
assert_eq!(a.result, b.result);
assert!((a.probability - b.probability).abs() < 1e-12);
}
}
/// Different seeds produce different results (probabilistically; with
/// measurements on a Bell state the chance of accidental agreement is
/// non-zero but small over many runs).
#[test]
fn different_seed_different_results() {
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1).measure(0).measure(1);
let mut any_differ = false;
// Try several seed pairs to reduce flakiness.
for offset in 0..20 {
let c1 = SimConfig {
seed: Some(100 + offset),
noise: None,
shots: None,
};
let c2 = SimConfig {
seed: Some(200 + offset),
noise: None,
shots: None,
};
let r1 = Simulator::run_with_config(&circuit, &c1).unwrap();
let r2 = Simulator::run_with_config(&circuit, &c2).unwrap();
if r1
.measurements
.iter()
.zip(r2.measurements.iter())
.any(|(a, b)| a.result != b.result)
{
any_differ = true;
break;
}
}
assert!(
any_differ,
"expected at least one pair of seeds to disagree"
);
}
/// Record + replay round-trip succeeds.
#[test]
fn record_replay_roundtrip() {
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1).measure(0).measure(1);
let config = SimConfig {
seed: Some(99),
noise: None,
shots: None,
};
let engine = ReplayEngine::new();
let record = engine.record_execution(&circuit, &config, 1);
assert!(engine.replay(&record, &circuit));
}
/// Circuit hash is deterministic: calling it twice yields the same value.
#[test]
fn circuit_hash_deterministic() {
let mut circuit = QuantumCircuit::new(3);
circuit.h(0).rx(1, 1.234).cnot(0, 2).measure(0);
let h1 = ReplayEngine::circuit_hash(&circuit);
let h2 = ReplayEngine::circuit_hash(&circuit);
assert_eq!(h1, h2);
}
/// Two structurally different circuits produce different hashes.
#[test]
fn circuit_hash_differs_for_different_circuits() {
let mut c1 = QuantumCircuit::new(2);
c1.h(0).cnot(0, 1);
let mut c2 = QuantumCircuit::new(2);
c2.x(0).cnot(0, 1);
let h1 = ReplayEngine::circuit_hash(&c1);
let h2 = ReplayEngine::circuit_hash(&c2);
assert_ne!(h1, h2);
}
/// Checkpoint capture/restore preserves amplitudes exactly.
#[test]
fn checkpoint_capture_restore() {
let amplitudes = vec![
Complex::new(0.5, 0.5),
Complex::new(-0.3, 0.1),
Complex::new(0.0, -0.7),
Complex::new(0.2, 0.0),
];
let checkpoint = StateCheckpoint::capture(&amplitudes);
let restored = checkpoint.restore();
assert_eq!(amplitudes.len(), restored.len());
for (orig, rest) in amplitudes.iter().zip(restored.iter()) {
assert_eq!(orig.re, rest.re);
assert_eq!(orig.im, rest.im);
}
}
/// Checkpoint size is 16 bytes per amplitude (re: 8 + im: 8).
#[test]
fn checkpoint_size_bytes() {
let amplitudes = vec![Complex::ZERO; 8];
let checkpoint = StateCheckpoint::capture(&amplitudes);
assert_eq!(checkpoint.size_bytes(), 8 * 16);
}
/// Replay fails if the circuit has been modified after recording.
#[test]
fn replay_fails_on_modified_circuit() {
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1).measure(0).measure(1);
let config = SimConfig {
seed: Some(42),
noise: None,
shots: None,
};
let engine = ReplayEngine::new();
let record = engine.record_execution(&circuit, &config, 1);
// Modify the circuit.
let mut modified = QuantumCircuit::new(2);
modified.x(0).cnot(0, 1).measure(0).measure(1);
assert!(!engine.replay(&record, &modified));
}
/// ExecutionRecord captures noise config when present.
#[test]
fn record_captures_noise() {
let circuit = QuantumCircuit::new(1);
let config = SimConfig {
seed: Some(7),
noise: Some(NoiseModel {
depolarizing_rate: 0.01,
bit_flip_rate: 0.005,
phase_flip_rate: 0.002,
}),
shots: None,
};
let engine = ReplayEngine::new();
let record = engine.record_execution(&circuit, &config, 100);
let nc = record.noise_config.as_ref().unwrap();
assert!((nc.depolarizing_rate - 0.01).abs() < 1e-15);
assert!((nc.bit_flip_rate - 0.005).abs() < 1e-15);
assert!((nc.phase_flip_rate - 0.002).abs() < 1e-15);
assert_eq!(record.shots, 100);
assert_eq!(record.seed, 7);
}
/// Empty circuit hashes deterministically and differently from non-empty.
#[test]
fn empty_circuit_hash() {
let empty = QuantumCircuit::new(2);
let mut non_empty = QuantumCircuit::new(2);
non_empty.h(0);
let h1 = ReplayEngine::circuit_hash(&empty);
let h2 = ReplayEngine::circuit_hash(&non_empty);
assert_ne!(h1, h2);
// Determinism.
assert_eq!(h1, ReplayEngine::circuit_hash(&empty));
}
/// Rotation angle differences produce different hashes.
#[test]
fn rotation_angle_changes_hash() {
let mut c1 = QuantumCircuit::new(1);
c1.rx(0, 1.0);
let mut c2 = QuantumCircuit::new(1);
c2.rx(0, 1.0001);
assert_ne!(
ReplayEngine::circuit_hash(&c1),
ReplayEngine::circuit_hash(&c2)
);
}
}

View File

@@ -0,0 +1,445 @@
//! SIMD-accelerated and parallel gate kernels for the state-vector engine.
//!
//! Provides optimised implementations of single-qubit and two-qubit gate
//! application using platform SIMD intrinsics (AVX2 on x86_64) and optional
//! rayon-based parallelism behind the `parallel` feature flag.
//!
//! The [`apply_single_qubit_gate_best`] and [`apply_two_qubit_gate_best`]
//! dispatch functions automatically select the fastest available kernel.
use crate::types::Complex;
// ---------------------------------------------------------------------------
// Conditional imports
// ---------------------------------------------------------------------------
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
use std::arch::x86_64::*;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
/// Threshold: only spawn rayon threads when the amplitude vector has at least
/// this many elements (corresponds to 16 qubits = 65 536 amplitudes).
#[cfg(feature = "parallel")]
const PARALLEL_THRESHOLD: usize = 65_536;
// =========================================================================
// Scalar fallback kernels
// =========================================================================
/// Apply a 2x2 unitary to `qubit` using the standard butterfly loop.
///
/// This is the baseline scalar implementation used on architectures without
/// specialised SIMD paths and as the fallback when the `simd` feature is
/// disabled.
#[inline]
pub fn apply_single_qubit_gate_scalar(
amplitudes: &mut [Complex],
qubit: u32,
matrix: &[[Complex; 2]; 2],
) {
let step = 1usize << qubit;
let n = amplitudes.len();
let mut block_start = 0;
while block_start < n {
for i in block_start..block_start + step {
let j = i + step;
let a = amplitudes[i];
let b = amplitudes[j];
amplitudes[i] = matrix[0][0] * a + matrix[0][1] * b;
amplitudes[j] = matrix[1][0] * a + matrix[1][1] * b;
}
block_start += step << 1;
}
}
/// Apply a 4x4 unitary to qubit pair (`q1`, `q2`) using scalar arithmetic.
#[inline]
pub fn apply_two_qubit_gate_scalar(
amplitudes: &mut [Complex],
q1: u32,
q2: u32,
matrix: &[[Complex; 4]; 4],
) {
let q1_bit = 1usize << q1;
let q2_bit = 1usize << q2;
let n = amplitudes.len();
for base in 0..n {
if base & q1_bit != 0 || base & q2_bit != 0 {
continue;
}
let idxs = [base, base | q2_bit, base | q1_bit, base | q1_bit | q2_bit];
let vals = [
amplitudes[idxs[0]],
amplitudes[idxs[1]],
amplitudes[idxs[2]],
amplitudes[idxs[3]],
];
for r in 0..4 {
amplitudes[idxs[r]] = matrix[r][0] * vals[0]
+ matrix[r][1] * vals[1]
+ matrix[r][2] * vals[2]
+ matrix[r][3] * vals[3];
}
}
}
// =========================================================================
// x86_64 SIMD kernels (AVX2)
// =========================================================================
/// Apply a single-qubit gate using AVX2 intrinsics.
///
/// Packs two complex numbers (4 f64 values) into a single `__m256d` register
/// and performs the butterfly multiply-add with SIMD parallelism. When the
/// `fma` target feature is available at compile time, fused multiply-add
/// instructions are used for improved throughput and precision.
///
/// # Safety
///
/// Requires the `avx2` target feature. The function is gated behind
/// `#[target_feature(enable = "avx2")]` and `is_x86_feature_detected!`
/// is checked at the dispatch site.
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
#[target_feature(enable = "avx2")]
pub unsafe fn apply_single_qubit_gate_simd(
amplitudes: &mut [Complex],
qubit: u32,
matrix: &[[Complex; 2]; 2],
) {
let step = 1usize << qubit;
let n = amplitudes.len();
// Pre-broadcast matrix elements into AVX registers.
// Each complex multiplication (a+bi)(c+di) = (ac-bd) + (ad+bc)i
// We store real and imaginary parts in separate broadcast vectors.
let m00_re = _mm256_set1_pd(matrix[0][0].re);
let m00_im = _mm256_set1_pd(matrix[0][0].im);
let m01_re = _mm256_set1_pd(matrix[0][1].re);
let m01_im = _mm256_set1_pd(matrix[0][1].im);
let m10_re = _mm256_set1_pd(matrix[1][0].re);
let m10_im = _mm256_set1_pd(matrix[1][0].im);
let m11_re = _mm256_set1_pd(matrix[1][1].re);
let m11_im = _mm256_set1_pd(matrix[1][1].im);
// Sign mask for negating imaginary parts during complex multiplication:
// complex mul: re_out = a_re*b_re - a_im*b_im
// im_out = a_re*b_im + a_im*b_re
// We use the pattern: load [re, im, re, im], shuffle, negate, add.
let neg_mask = _mm256_set_pd(-1.0, 1.0, -1.0, 1.0);
// Process two complex pairs at a time when step >= 2, else fall back.
if step >= 2 {
let mut block_start = 0;
while block_start < n {
// Process pairs within this butterfly block.
let mut i = block_start;
while i + 1 < block_start + step {
let j = i + step;
// Load two complex values from position i: [re0, im0, re1, im1]
let a_vec = _mm256_loadu_pd(&amplitudes[i] as *const Complex as *const f64);
// Load two complex values from position j
let b_vec = _mm256_loadu_pd(&amplitudes[j] as *const Complex as *const f64);
// Compute matrix[0][0] * a + matrix[0][1] * b for the i-slot
let out_i =
complex_mul_add_avx2(a_vec, m00_re, m00_im, b_vec, m01_re, m01_im, neg_mask);
// Compute matrix[1][0] * a + matrix[1][1] * b for the j-slot
let out_j =
complex_mul_add_avx2(a_vec, m10_re, m10_im, b_vec, m11_re, m11_im, neg_mask);
_mm256_storeu_pd(&mut amplitudes[i] as *mut Complex as *mut f64, out_i);
_mm256_storeu_pd(&mut amplitudes[j] as *mut Complex as *mut f64, out_j);
i += 2;
}
// Handle the last element if step is odd (rare but correct).
if step & 1 != 0 {
let i = block_start + step - 1;
let j = i + step;
let a = amplitudes[i];
let b = amplitudes[j];
amplitudes[i] = matrix[0][0] * a + matrix[0][1] * b;
amplitudes[j] = matrix[1][0] * a + matrix[1][1] * b;
}
block_start += step << 1;
}
} else {
// step == 1 (qubit 0): each butterfly is a single pair, no SIMD
// packing benefit on the inner loop. Use scalar.
apply_single_qubit_gate_scalar(amplitudes, qubit, matrix);
}
}
/// Compute `m_a * a_vec + m_b * b_vec` where each operand represents two
/// packed complex numbers and `m_a`, `m_b` are broadcast complex scalars
/// given as separate real/imag broadcast registers.
///
/// # Layout
///
/// Each `__m256d` holds `[re0, im0, re1, im1]` -- two complex numbers.
/// The multiplication `(mr + mi*i) * (re + im*i)` expands to:
/// real_part = mr*re - mi*im
/// imag_part = mr*im + mi*re
///
/// # Safety
///
/// Caller must ensure AVX2 is available.
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
#[target_feature(enable = "avx2")]
#[inline]
unsafe fn complex_mul_add_avx2(
a: __m256d,
ma_re: __m256d,
ma_im: __m256d,
b: __m256d,
mb_re: __m256d,
mb_im: __m256d,
neg_mask: __m256d,
) -> __m256d {
// Complex multiply: m_a * a
// a = [a0_re, a0_im, a1_re, a1_im]
// Shuffle to get [a0_im, a0_re, a1_im, a1_re]
let a_swap = _mm256_permute_pd(a, 0b0101);
// ma_re * a = [ma_re*a0_re, ma_re*a0_im, ma_re*a1_re, ma_re*a1_im]
let prod_a_re = _mm256_mul_pd(ma_re, a);
// ma_im * a_swap = [ma_im*a0_im, ma_im*a0_re, ma_im*a1_im, ma_im*a1_re]
let prod_a_im = _mm256_mul_pd(ma_im, a_swap);
// Apply sign: negate where needed to get (re, im) correct
// neg_mask = [-1, 1, -1, 1] so this gives:
// [-ma_im*a0_im, ma_im*a0_re, -ma_im*a1_im, ma_im*a1_re]
let prod_a_im_signed = _mm256_mul_pd(prod_a_im, neg_mask);
// Sum: [ma_re*a0_re - ma_im*a0_im, ma_re*a0_im + ma_im*a0_re, ...]
let result_a = _mm256_add_pd(prod_a_re, prod_a_im_signed);
// Complex multiply: m_b * b (same pattern)
let b_swap = _mm256_permute_pd(b, 0b0101);
let prod_b_re = _mm256_mul_pd(mb_re, b);
let prod_b_im = _mm256_mul_pd(mb_im, b_swap);
let prod_b_im_signed = _mm256_mul_pd(prod_b_im, neg_mask);
let result_b = _mm256_add_pd(prod_b_re, prod_b_im_signed);
// Final sum: m_a * a + m_b * b
_mm256_add_pd(result_a, result_b)
}
/// Apply a two-qubit gate with SIMD assistance.
///
/// The two-qubit butterfly accesses four non-contiguous amplitude indices per
/// group, which makes manual SIMD vectorisation via gather/scatter slower than
/// letting LLVM auto-vectorise the scalar loop (gather throughput on current
/// x86_64 microarchitectures is poor). This function therefore delegates to
/// the scalar kernel, which LLVM will auto-vectorise when compiling with
/// `-C target-cpu=native`.
///
/// The single-qubit kernel is the primary beneficiary of manual AVX2
/// vectorisation because its butterfly pairs are contiguous in memory.
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
pub fn apply_two_qubit_gate_simd(
amplitudes: &mut [Complex],
q1: u32,
q2: u32,
matrix: &[[Complex; 4]; 4],
) {
apply_two_qubit_gate_scalar(amplitudes, q1, q2, matrix);
}
// =========================================================================
// Parallel kernels (rayon)
// =========================================================================
/// Apply a single-qubit gate using rayon parallel iteration.
///
/// The amplitude array is split into chunks that each contain complete
/// butterfly blocks (pairs of indices separated by `step = 2^qubit`).
/// Each chunk is processed independently in parallel.
///
/// Only spawns threads when the state vector has at least 65 536 amplitudes
/// (16+ qubits). For smaller states the overhead of thread dispatch exceeds
/// the computation time, so we fall back to the scalar kernel.
#[cfg(feature = "parallel")]
pub fn apply_single_qubit_gate_parallel(
amplitudes: &mut [Complex],
qubit: u32,
matrix: &[[Complex; 2]; 2],
) {
let n = amplitudes.len();
// Not worth parallelising for small states.
if n < PARALLEL_THRESHOLD {
apply_single_qubit_gate_scalar(amplitudes, qubit, matrix);
return;
}
let step = 1usize << qubit;
let block_size = step << 1; // size of one complete butterfly block
// Choose a chunk size that contains at least one complete block and is
// large enough to amortise rayon overhead. We round up to the nearest
// multiple of block_size.
let min_chunk = 4096.max(block_size);
let chunk_size = ((min_chunk + block_size - 1) / block_size) * block_size;
// Clone matrix elements so the closure is Send.
let m = *matrix;
amplitudes.par_chunks_mut(chunk_size).for_each(|chunk| {
let chunk_len = chunk.len();
let mut block_start = 0;
while block_start + block_size <= chunk_len {
for i in block_start..block_start + step {
let j = i + step;
let a = chunk[i];
let b = chunk[j];
chunk[i] = m[0][0] * a + m[0][1] * b;
chunk[j] = m[1][0] * a + m[1][1] * b;
}
block_start += block_size;
}
});
}
/// Apply a two-qubit gate using rayon parallel iteration.
///
/// Parallelises over groups of base indices. Each thread processes a range of
/// base addresses and applies the 4x4 matrix to the four corresponding
/// amplitude slots.
///
/// Falls back to scalar for states smaller than [`PARALLEL_THRESHOLD`].
#[cfg(feature = "parallel")]
pub fn apply_two_qubit_gate_parallel(
amplitudes: &mut [Complex],
q1: u32,
q2: u32,
matrix: &[[Complex; 4]; 4],
) {
let n = amplitudes.len();
if n < PARALLEL_THRESHOLD {
apply_two_qubit_gate_scalar(amplitudes, q1, q2, matrix);
return;
}
let q1_bit = 1usize << q1;
let q2_bit = 1usize << q2;
let m = *matrix;
// We cannot use par_chunks_mut because the four indices per group are
// non-contiguous. Instead, collect all valid base indices and process
// them in parallel via an unsafe split.
//
// Safety: each base index produces four distinct target indices, and no
// two valid base indices share any target index. Therefore the writes
// are disjoint and parallel mutation is safe.
let bases: Vec<usize> = (0..n)
.filter(|&base| base & q1_bit == 0 && base & q2_bit == 0)
.collect();
// Safety: the disjoint index property guarantees no data races. Each
// base produces indices {base, base|q2_bit, base|q1_bit,
// base|q1_bit|q2_bit} and these sets are pairwise disjoint across
// different valid bases.
//
// We transmit the pointer as a usize to satisfy Send+Sync bounds,
// then reconstruct it inside each parallel closure.
let amp_addr = amplitudes.as_mut_ptr() as usize;
bases.par_iter().for_each(move |&base| {
// Safety: amp_addr was derived from a valid &mut [Complex] and the
// disjoint index invariant prevents data races.
unsafe {
let ptr = amp_addr as *mut Complex;
let idxs = [base, base | q2_bit, base | q1_bit, base | q1_bit | q2_bit];
let vals = [
*ptr.add(idxs[0]),
*ptr.add(idxs[1]),
*ptr.add(idxs[2]),
*ptr.add(idxs[3]),
];
for r in 0..4 {
*ptr.add(idxs[r]) =
m[r][0] * vals[0] + m[r][1] * vals[1] + m[r][2] * vals[2] + m[r][3] * vals[3];
}
}
});
}
// =========================================================================
// Dispatch functions
// =========================================================================
/// Apply a single-qubit gate using the best available kernel.
///
/// Selection order:
/// 1. **Parallel + SIMD** -- `parallel` feature enabled and state is large enough
/// 2. **SIMD only** -- `simd` feature enabled and AVX2 is detected at runtime
/// 3. **Parallel only** -- `parallel` feature enabled and state is large enough
/// 4. **Scalar fallback** -- always available
///
/// For states below [`PARALLEL_THRESHOLD`] (65 536 amplitudes / 16 qubits),
/// the parallel path is skipped because thread dispatch overhead dominates.
pub fn apply_single_qubit_gate_best(
amplitudes: &mut [Complex],
qubit: u32,
matrix: &[[Complex; 2]; 2],
) {
// Large states: prefer parallel when available.
#[cfg(feature = "parallel")]
{
if amplitudes.len() >= PARALLEL_THRESHOLD {
apply_single_qubit_gate_parallel(amplitudes, qubit, matrix);
return;
}
}
// Medium/small states: try SIMD.
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
{
if is_x86_feature_detected!("avx2") {
// Safety: AVX2 availability is checked by the runtime detection
// macro above.
unsafe {
apply_single_qubit_gate_simd(amplitudes, qubit, matrix);
}
return;
}
}
// Scalar fallback.
apply_single_qubit_gate_scalar(amplitudes, qubit, matrix);
}
/// Apply a two-qubit gate using the best available kernel.
///
/// Selection order mirrors [`apply_single_qubit_gate_best`]:
/// parallel first (for large states), then SIMD, then scalar.
pub fn apply_two_qubit_gate_best(
amplitudes: &mut [Complex],
q1: u32,
q2: u32,
matrix: &[[Complex; 4]; 4],
) {
#[cfg(feature = "parallel")]
{
if amplitudes.len() >= PARALLEL_THRESHOLD {
apply_two_qubit_gate_parallel(amplitudes, q1, q2, matrix);
return;
}
}
// The two-qubit SIMD kernel delegates to scalar (see apply_two_qubit_gate_simd
// doc comment for rationale), so we always use the scalar path here.
apply_two_qubit_gate_scalar(amplitudes, q1, q2, matrix);
}

View File

@@ -0,0 +1,221 @@
//! High-level simulator that executes quantum circuits
use crate::circuit::QuantumCircuit;
use crate::error::Result;
use crate::gate::Gate;
use crate::state::QuantumState;
use crate::types::*;
use rand::Rng;
use std::collections::HashMap;
use std::time::Instant;
/// Configuration for a simulation run.
pub struct SimConfig {
/// Deterministic seed. `None` uses OS entropy.
pub seed: Option<u64>,
/// Optional noise model applied after every gate.
pub noise: Option<NoiseModel>,
/// Number of repeated shots (`None` = single run returning state).
pub shots: Option<u32>,
}
impl Default for SimConfig {
fn default() -> Self {
Self {
seed: None,
noise: None,
shots: None,
}
}
}
/// Result of a single simulation run (state + measurements).
pub struct SimulationResult {
pub state: QuantumState,
pub measurements: Vec<MeasurementOutcome>,
pub metrics: SimulationMetrics,
}
/// Result of a multi-shot simulation (histogram of outcomes).
pub struct ShotResult {
pub counts: HashMap<Vec<bool>, usize>,
pub metrics: SimulationMetrics,
}
/// Stateless simulator entry-point.
pub struct Simulator;
impl Simulator {
/// Run a circuit once with default configuration.
pub fn run(circuit: &QuantumCircuit) -> Result<SimulationResult> {
Self::run_with_config(circuit, &SimConfig::default())
}
/// Run a circuit once with explicit configuration.
pub fn run_with_config(
circuit: &QuantumCircuit,
config: &SimConfig,
) -> Result<SimulationResult> {
let start = Instant::now();
let mut state = match config.seed {
Some(seed) => QuantumState::new_with_seed(circuit.num_qubits(), seed)?,
None => QuantumState::new(circuit.num_qubits())?,
};
let mut measurements = Vec::new();
let mut gate_count: usize = 0;
for gate in circuit.gates() {
let outcomes = state.apply_gate(gate)?;
measurements.extend(outcomes);
if !gate.is_non_unitary() {
gate_count += 1;
}
// Apply noise channel after each gate when a model is provided.
if let Some(ref noise) = config.noise {
apply_noise(&mut state, gate, noise);
}
}
let elapsed = start.elapsed();
let metrics = SimulationMetrics {
num_qubits: circuit.num_qubits(),
gate_count,
execution_time_ns: elapsed.as_nanos() as u64,
peak_memory_bytes: QuantumState::estimate_memory(circuit.num_qubits()),
gates_per_second: if elapsed.as_secs_f64() > 0.0 {
gate_count as f64 / elapsed.as_secs_f64()
} else {
0.0
},
gates_fused: 0,
};
Ok(SimulationResult {
state,
measurements,
metrics,
})
}
/// Run a circuit `shots` times, collecting a histogram of measurement outcomes.
///
/// If the circuit contains no `Measure` gates, all qubits are measured
/// automatically at the end of each shot.
pub fn run_shots(
circuit: &QuantumCircuit,
shots: u32,
seed: Option<u64>,
) -> Result<ShotResult> {
let start = Instant::now();
let mut counts: HashMap<Vec<bool>, usize> = HashMap::new();
let base_seed = seed.unwrap_or(42);
let mut total_gates: usize = 0;
let n_qubits = circuit.num_qubits();
let has_measurements = circuit
.gates()
.iter()
.any(|g| matches!(g, Gate::Measure(_)));
for shot in 0..shots {
let config = SimConfig {
seed: Some(base_seed.wrapping_add(shot as u64)),
noise: None,
shots: None,
};
let mut result = Self::run_with_config(circuit, &config)?;
total_gates += result.metrics.gate_count;
// Implicit measurement when the circuit has none.
if !has_measurements {
let outcomes = result.state.measure_all()?;
result.measurements.extend(outcomes);
}
// Build a bit-vector keyed by qubit index.
let mut bits = vec![false; n_qubits as usize];
for m in &result.measurements {
if (m.qubit as usize) < bits.len() {
bits[m.qubit as usize] = m.result;
}
}
*counts.entry(bits).or_insert(0) += 1;
}
let elapsed = start.elapsed();
let metrics = SimulationMetrics {
num_qubits: n_qubits,
gate_count: total_gates,
execution_time_ns: elapsed.as_nanos() as u64,
peak_memory_bytes: QuantumState::estimate_memory(n_qubits),
gates_per_second: if elapsed.as_secs_f64() > 0.0 {
total_gates as f64 / elapsed.as_secs_f64()
} else {
0.0
},
gates_fused: 0,
};
Ok(ShotResult { counts, metrics })
}
}
// ---------------------------------------------------------------------------
// Noise channel
// ---------------------------------------------------------------------------
/// Apply a stochastic noise channel to the state after a gate.
///
/// For each qubit that the gate touches:
/// - with probability `depolarizing_rate`, apply a random Pauli (X, Y, or Z
/// each with probability 1/3);
/// - with probability `bit_flip_rate`, apply X;
/// - with probability `phase_flip_rate`, apply Z.
fn apply_noise(state: &mut QuantumState, gate: &Gate, noise: &NoiseModel) {
let qubits = gate.qubits();
if qubits.is_empty() {
return;
}
for &qubit in &qubits {
// Depolarising channel
if noise.depolarizing_rate > 0.0 {
let r: f64 = state.rng_mut().gen();
if r < noise.depolarizing_rate {
let choice: f64 = state.rng_mut().gen();
let pauli = if choice < 1.0 / 3.0 {
Gate::X(qubit)
} else if choice < 2.0 / 3.0 {
Gate::Y(qubit)
} else {
Gate::Z(qubit)
};
if let Some(m) = pauli.matrix_1q() {
state.apply_single_qubit_gate(qubit, &m);
}
}
}
// Bit-flip channel
if noise.bit_flip_rate > 0.0 {
let r: f64 = state.rng_mut().gen();
if r < noise.bit_flip_rate {
let m = Gate::X(qubit).matrix_1q().unwrap();
state.apply_single_qubit_gate(qubit, &m);
}
}
// Phase-flip channel
if noise.phase_flip_rate > 0.0 {
let r: f64 = state.rng_mut().gen();
if r < noise.phase_flip_rate {
let m = Gate::Z(qubit).matrix_1q().unwrap();
state.apply_single_qubit_gate(qubit, &m);
}
}
}
}

View File

@@ -0,0 +1,789 @@
//! Aaronson-Gottesman stabilizer simulator for Clifford circuits.
//!
//! Uses a tableau of 2n rows and (2n+1) columns to represent the stabilizer
//! and destabilizer generators of an n-qubit state. Each Clifford gate is
//! applied in O(n) time and each measurement in O(n^2), enabling simulation
//! of millions of qubits for circuits composed entirely of Clifford gates.
//!
//! Reference: Aaronson & Gottesman, "Improved Simulation of Stabilizer
//! Circuits", Phys. Rev. A 70, 052328 (2004).
use crate::error::{QuantumError, Result};
use crate::gate::Gate;
use crate::types::MeasurementOutcome;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
/// Stabilizer state for efficient Clifford circuit simulation.
///
/// Uses the Aaronson-Gottesman tableau representation to simulate
/// Clifford circuits in O(n^2) time per gate, enabling simulation
/// of millions of qubits.
pub struct StabilizerState {
num_qubits: usize,
/// Tableau: 2n rows, each row has n X-bits, n Z-bits, and 1 phase bit.
/// Stored as a flat `Vec<bool>` for simplicity.
/// Row i occupies indices `[i * stride .. (i+1) * stride)`.
/// Layout within a row: `x[0..n], z[0..n], r` (total width = 2n + 1).
tableau: Vec<bool>,
rng: StdRng,
measurement_record: Vec<MeasurementOutcome>,
}
impl StabilizerState {
// -----------------------------------------------------------------------
// Construction
// -----------------------------------------------------------------------
/// Create a new stabilizer state representing |00...0>.
///
/// The initial tableau has destabilizer i = X_i, stabilizer i = Z_i,
/// and all phase bits set to 0.
pub fn new(num_qubits: usize) -> Result<Self> {
Self::new_with_seed(num_qubits, 0)
}
/// Create a new stabilizer state with a specific RNG seed.
pub fn new_with_seed(num_qubits: usize, seed: u64) -> Result<Self> {
if num_qubits == 0 {
return Err(QuantumError::CircuitError(
"stabilizer state requires at least 1 qubit".into(),
));
}
let n = num_qubits;
let stride = 2 * n + 1;
let total = 2 * n * stride;
let mut tableau = vec![false; total];
// Destabilizer i (row i): X_i => x[i]=1, rest zero
for i in 0..n {
tableau[i * stride + i] = true; // x bit for qubit i
}
// Stabilizer i (row n+i): Z_i => z[i]=1, rest zero
for i in 0..n {
tableau[(n + i) * stride + n + i] = true; // z bit for qubit i
}
Ok(Self {
num_qubits,
tableau,
rng: StdRng::seed_from_u64(seed),
measurement_record: Vec::new(),
})
}
// -----------------------------------------------------------------------
// Tableau access helpers
// -----------------------------------------------------------------------
#[inline]
fn stride(&self) -> usize {
2 * self.num_qubits + 1
}
/// Get the X bit for `(row, col)`.
#[inline]
fn x(&self, row: usize, col: usize) -> bool {
self.tableau[row * self.stride() + col]
}
/// Get the Z bit for `(row, col)`.
#[inline]
fn z(&self, row: usize, col: usize) -> bool {
self.tableau[row * self.stride() + self.num_qubits + col]
}
/// Get the phase bit for `row`.
#[inline]
fn r(&self, row: usize) -> bool {
self.tableau[row * self.stride() + 2 * self.num_qubits]
}
#[inline]
fn set_x(&mut self, row: usize, col: usize, val: bool) {
let idx = row * self.stride() + col;
self.tableau[idx] = val;
}
#[inline]
fn set_z(&mut self, row: usize, col: usize, val: bool) {
let idx = row * self.stride() + self.num_qubits + col;
self.tableau[idx] = val;
}
#[inline]
fn set_r(&mut self, row: usize, val: bool) {
let idx = row * self.stride() + 2 * self.num_qubits;
self.tableau[idx] = val;
}
/// Multiply row `target` by row `source` (left-multiply the Pauli string
/// of `target` by that of `source`), updating the phase of `target`.
///
/// Uses the `g` function to accumulate the phase contribution from
/// each qubit position.
fn row_mult(&mut self, target: usize, source: usize) {
let n = self.num_qubits;
let mut phase_sum: i32 = 0;
// Accumulate phase from commutation relations
for j in 0..n {
phase_sum += g(
self.x(source, j),
self.z(source, j),
self.x(target, j),
self.z(target, j),
);
}
// Combine phases: new_r = (2*r_target + 2*r_source + phase_sum) mod 4
// r=1 means phase -1 (i.e. factor of i^2 = -1), so we work mod 4 in
// units of i. r_bit maps to 0 or 2.
let total = 2 * (self.r(target) as i32) + 2 * (self.r(source) as i32) + phase_sum;
// Result phase bit: total mod 4 == 2 => r=1, else r=0
let new_r = ((total % 4) + 4) % 4 == 2;
self.set_r(target, new_r);
// XOR the X and Z bits
let stride = self.stride();
for j in 0..n {
let sx = self.tableau[source * stride + j];
self.tableau[target * stride + j] ^= sx;
}
for j in 0..n {
let sz = self.tableau[source * stride + n + j];
self.tableau[target * stride + n + j] ^= sz;
}
}
// -----------------------------------------------------------------------
// Clifford gate operations
// -----------------------------------------------------------------------
/// Apply a Hadamard gate on `qubit`.
///
/// Conjugation rules: H X H = Z, H Z H = X, H Y H = -Y.
/// Tableau update: swap X and Z columns for this qubit in every row,
/// and flip the phase bit where both X and Z were set (Y -> -Y).
pub fn hadamard(&mut self, qubit: usize) {
let n = self.num_qubits;
for i in 0..(2 * n) {
let xi = self.x(i, qubit);
let zi = self.z(i, qubit);
// phase flip for Y entries: if both x and z are set
if xi && zi {
self.set_r(i, !self.r(i));
}
// swap x and z
self.set_x(i, qubit, zi);
self.set_z(i, qubit, xi);
}
}
/// Apply the phase gate (S gate) on `qubit`.
///
/// Conjugation rules: S X S^dag = Y, S Z S^dag = Z, S Y S^dag = -X.
/// Tableau update: Z_j -> Z_j XOR X_j, phase flipped where X and Z
/// are both set.
pub fn phase_gate(&mut self, qubit: usize) {
let n = self.num_qubits;
for i in 0..(2 * n) {
let xi = self.x(i, qubit);
let zi = self.z(i, qubit);
// Phase update: r ^= (x AND z)
if xi && zi {
self.set_r(i, !self.r(i));
}
// z -> z XOR x
self.set_z(i, qubit, zi ^ xi);
}
}
/// Apply a CNOT gate with `control` and `target`.
///
/// Conjugation rules:
/// X_c -> X_c X_t, Z_t -> Z_c Z_t,
/// X_t -> X_t, Z_c -> Z_c.
/// Tableau update for every row:
/// phase ^= x_c AND z_t AND (x_t XOR z_c XOR 1)
/// x_t ^= x_c
/// z_c ^= z_t
pub fn cnot(&mut self, control: usize, target: usize) {
let n = self.num_qubits;
for i in 0..(2 * n) {
let xc = self.x(i, control);
let zt = self.z(i, target);
let xt = self.x(i, target);
let zc = self.z(i, control);
// Phase update
if xc && zt && (xt == zc) {
self.set_r(i, !self.r(i));
}
// x_target ^= x_control
self.set_x(i, target, xt ^ xc);
// z_control ^= z_target
self.set_z(i, control, zc ^ zt);
}
}
/// Apply a Pauli-X gate on `qubit`.
///
/// Conjugation: X commutes with X, anticommutes with Z and Y.
/// Tableau update: flip phase where Z bit is set for this qubit.
pub fn x_gate(&mut self, qubit: usize) {
let n = self.num_qubits;
for i in 0..(2 * n) {
if self.z(i, qubit) {
self.set_r(i, !self.r(i));
}
}
}
/// Apply a Pauli-Y gate on `qubit`.
///
/// Conjugation: Y anticommutes with both X and Z.
/// Tableau update: flip phase where X or Z (but via XOR: where x XOR z).
pub fn y_gate(&mut self, qubit: usize) {
let n = self.num_qubits;
for i in 0..(2 * n) {
let xi = self.x(i, qubit);
let zi = self.z(i, qubit);
// Y anticommutes with X and Z, commutes with Y and I
// phase flips when exactly one of x,z is set (i.e. X or Z, not Y or I)
if xi ^ zi {
self.set_r(i, !self.r(i));
}
}
}
/// Apply a Pauli-Z gate on `qubit`.
///
/// Conjugation: Z commutes with Z, anticommutes with X and Y.
/// Tableau update: flip phase where X bit is set for this qubit.
pub fn z_gate(&mut self, qubit: usize) {
let n = self.num_qubits;
for i in 0..(2 * n) {
if self.x(i, qubit) {
self.set_r(i, !self.r(i));
}
}
}
/// Apply a CZ (controlled-Z) gate on `q1` and `q2`.
///
/// CZ = (I x H) . CNOT . (I x H). Implemented by decomposition.
pub fn cz(&mut self, q1: usize, q2: usize) {
self.hadamard(q2);
self.cnot(q1, q2);
self.hadamard(q2);
}
/// Apply a SWAP gate on `q1` and `q2`.
///
/// SWAP = CNOT(q1,q2) . CNOT(q2,q1) . CNOT(q1,q2).
pub fn swap(&mut self, q1: usize, q2: usize) {
self.cnot(q1, q2);
self.cnot(q2, q1);
self.cnot(q1, q2);
}
// -----------------------------------------------------------------------
// Measurement
// -----------------------------------------------------------------------
/// Measure `qubit` in the computational (Z) basis.
///
/// Follows the Aaronson-Gottesman algorithm:
/// 1. Check if any stabilizer generator anticommutes with Z on the
/// measured qubit (i.e. has its X bit set for that qubit).
/// 2. If yes (random outcome): collapse the state and record the result.
/// 3. If no (deterministic outcome): compute the result from phases.
pub fn measure(&mut self, qubit: usize) -> Result<MeasurementOutcome> {
if qubit >= self.num_qubits {
return Err(QuantumError::InvalidQubitIndex {
index: qubit as u32,
num_qubits: self.num_qubits as u32,
});
}
let n = self.num_qubits;
// Search for a stabilizer (rows n..2n-1) that anticommutes with Z_qubit.
// A generator anticommutes with Z_qubit iff its X bit for that qubit is 1.
let p = (n..(2 * n)).find(|&i| self.x(i, qubit));
if let Some(p) = p {
// --- Random outcome ---
// For every other row that anticommutes with Z_qubit, multiply it by row p
// to make it commute.
for i in 0..(2 * n) {
if i != p && self.x(i, qubit) {
self.row_mult(i, p);
}
}
// Move row p to the destabilizer: copy stabilizer p to destabilizer (p-n),
// then set row p to be +/- Z_qubit.
let dest_row = p - n;
let stride = self.stride();
// Copy row p to destabilizer row
for j in 0..stride {
self.tableau[dest_row * stride + j] = self.tableau[p * stride + j];
}
// Clear row p and set it to Z_qubit with random phase
for j in 0..stride {
self.tableau[p * stride + j] = false;
}
self.set_z(p, qubit, true);
let result: bool = self.rng.gen();
self.set_r(p, result);
let outcome = MeasurementOutcome {
qubit: qubit as u32,
result,
probability: 0.5,
};
self.measurement_record.push(outcome.clone());
Ok(outcome)
} else {
// --- Deterministic outcome ---
// No stabilizer anticommutes with Z_qubit, so Z_qubit is in the
// stabilizer group. We need to determine its sign.
//
// Use a scratch row technique: set a temporary row to the identity,
// then multiply in every destabilizer whose corresponding stabilizer
// has x[qubit]=1... but since we confirmed no stabilizer has x set,
// we look at destabilizers instead.
//
// Actually per the CHP algorithm: accumulate into a scratch state
// by multiplying destabilizer rows whose *destabilizer* X bit for
// this qubit is set. The accumulated phase gives the measurement
// outcome.
// We'll use the first extra technique: allocate a scratch row
// initialized to +I and multiply in all generators from rows 0..n
// (destabilizers) that have x[qubit]=1 in the *stabilizer* row n+i.
// Wait -- let me re-read the CHP paper carefully.
//
// Per Aaronson-Gottesman (Section III.C, deterministic case):
// Set scratch = identity. For each i in 0..n, if destabilizer i
// has x[qubit]=1, multiply scratch by stabilizer (n+i).
// The phase of the scratch row gives the measurement result.
let stride = self.stride();
let mut scratch = vec![false; stride];
for i in 0..n {
// Check destabilizer row i: does it have x[qubit] set?
if self.x(i, qubit) {
// Multiply scratch by stabilizer row (n+i)
let stab_row = n + i;
let mut phase_sum: i32 = 0;
for j in 0..n {
let sx = scratch[j];
let sz = scratch[n + j];
let rx = self.x(stab_row, j);
let rz = self.z(stab_row, j);
phase_sum += g(rx, rz, sx, sz);
}
let scratch_r = scratch[2 * n];
let stab_r = self.r(stab_row);
let total = 2 * (scratch_r as i32) + 2 * (stab_r as i32) + phase_sum;
scratch[2 * n] = ((total % 4) + 4) % 4 == 2;
for j in 0..n {
scratch[j] ^= self.x(stab_row, j);
}
for j in 0..n {
scratch[n + j] ^= self.z(stab_row, j);
}
}
}
let result = scratch[2 * n]; // phase bit = measurement outcome
let outcome = MeasurementOutcome {
qubit: qubit as u32,
result,
probability: 1.0,
};
self.measurement_record.push(outcome.clone());
Ok(outcome)
}
}
// -----------------------------------------------------------------------
// Accessors
// -----------------------------------------------------------------------
/// Return the number of qubits in this stabilizer state.
pub fn num_qubits(&self) -> usize {
self.num_qubits
}
/// Return the measurement record accumulated so far.
pub fn measurement_record(&self) -> &[MeasurementOutcome] {
&self.measurement_record
}
/// Create a copy of this stabilizer state with a new RNG seed.
///
/// The quantum state (tableau) is duplicated exactly; only the RNG
/// and measurement record are reset. This is used by the Clifford+T
/// backend to fork stabilizer terms during T-gate decomposition.
pub fn clone_with_seed(&self, seed: u64) -> Result<Self> {
Ok(Self {
num_qubits: self.num_qubits,
tableau: self.tableau.clone(),
rng: StdRng::seed_from_u64(seed),
measurement_record: Vec::new(),
})
}
/// Check whether a gate is a Clifford gate (simulable by this backend).
///
/// Clifford gates are: H, X, Y, Z, S, Sdg, CNOT, CZ, SWAP.
/// Measure and Reset are also supported (non-unitary but handled).
/// T, Tdg, Rx, Ry, Rz, Phase, Rzz, and custom unitaries are NOT Clifford
/// in general.
pub fn is_clifford_gate(gate: &Gate) -> bool {
matches!(
gate,
Gate::H(_)
| Gate::X(_)
| Gate::Y(_)
| Gate::Z(_)
| Gate::S(_)
| Gate::Sdg(_)
| Gate::CNOT(_, _)
| Gate::CZ(_, _)
| Gate::SWAP(_, _)
| Gate::Measure(_)
| Gate::Barrier
)
}
// -----------------------------------------------------------------------
// Gate dispatch
// -----------------------------------------------------------------------
/// Apply a gate from the `Gate` enum, returning measurement outcomes if any.
///
/// Returns an error for non-Clifford gates.
pub fn apply_gate(&mut self, gate: &Gate) -> Result<Vec<MeasurementOutcome>> {
match gate {
Gate::H(q) => {
self.hadamard(*q as usize);
Ok(vec![])
}
Gate::X(q) => {
self.x_gate(*q as usize);
Ok(vec![])
}
Gate::Y(q) => {
self.y_gate(*q as usize);
Ok(vec![])
}
Gate::Z(q) => {
self.z_gate(*q as usize);
Ok(vec![])
}
Gate::S(q) => {
self.phase_gate(*q as usize);
Ok(vec![])
}
Gate::Sdg(q) => {
// S^dag = S^3: apply S three times
let qu = *q as usize;
self.phase_gate(qu);
self.phase_gate(qu);
self.phase_gate(qu);
Ok(vec![])
}
Gate::CNOT(c, t) => {
self.cnot(*c as usize, *t as usize);
Ok(vec![])
}
Gate::CZ(q1, q2) => {
self.cz(*q1 as usize, *q2 as usize);
Ok(vec![])
}
Gate::SWAP(q1, q2) => {
self.swap(*q1 as usize, *q2 as usize);
Ok(vec![])
}
Gate::Measure(q) => {
let outcome = self.measure(*q as usize)?;
Ok(vec![outcome])
}
Gate::Barrier => Ok(vec![]),
_ => Err(QuantumError::CircuitError(format!(
"gate {:?} is not a Clifford gate and cannot be simulated \
by the stabilizer backend",
gate
))),
}
}
}
// ---------------------------------------------------------------------------
// Phase accumulation helper
// ---------------------------------------------------------------------------
/// Compute the phase contribution when multiplying two single-qubit Pauli
/// operators encoded as (x, z) bits.
///
/// Returns 0, +1, or -1 representing a phase of i^0, i^1, or i^{-1}.
///
/// Encoding: (0,0)=I, (1,0)=X, (1,1)=Y, (0,1)=Z.
#[inline]
fn g(x1: bool, z1: bool, x2: bool, z2: bool) -> i32 {
if !x1 && !z1 {
return 0; // I * anything = 0 phase
}
if x1 && z1 {
// Y * ...
if x2 && z2 {
0
} else if x2 {
1
} else if z2 {
-1
} else {
0
}
} else if x1 && !z1 {
// X * ...
if x2 && z2 {
-1
} else if x2 {
0
} else if z2 {
1
} else {
0
}
} else {
// Z * ... (z1 && !x1)
if x2 && z2 {
1
} else if x2 {
-1
} else {
0
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_initial_state_measurement() {
// |0> state: measuring should give 0 deterministically
let mut state = StabilizerState::new(1).unwrap();
let outcome = state.measure(0).unwrap();
assert!(!outcome.result, "measuring |0> should yield 0");
assert_eq!(outcome.probability, 1.0);
}
#[test]
fn test_x_gate_flips() {
// X|0> = |1>: measuring should give 1 deterministically
let mut state = StabilizerState::new(1).unwrap();
state.x_gate(0);
let outcome = state.measure(0).unwrap();
assert!(outcome.result, "measuring X|0> should yield 1");
assert_eq!(outcome.probability, 1.0);
}
#[test]
fn test_hadamard_creates_superposition() {
// H|0> = |+>: measurement should be random (prob 0.5)
let mut state = StabilizerState::new_with_seed(1, 42).unwrap();
state.hadamard(0);
let outcome = state.measure(0).unwrap();
assert_eq!(outcome.probability, 0.5);
}
#[test]
fn test_bell_state() {
// Create Bell state |00> + |11> (up to normalization)
// Both qubits should always measure the same value.
let mut state = StabilizerState::new_with_seed(2, 123).unwrap();
state.hadamard(0);
state.cnot(0, 1);
let o0 = state.measure(0).unwrap();
let o1 = state.measure(1).unwrap();
assert_eq!(o0.result, o1.result, "Bell state qubits must be correlated");
}
#[test]
fn test_z_gate_phase() {
// Z|0> = |0> (no change)
let mut state = StabilizerState::new(1).unwrap();
state.z_gate(0);
let outcome = state.measure(0).unwrap();
assert!(!outcome.result, "Z|0> should still be |0>");
// Z|1> = -|1> (global phase, same measurement)
let mut state2 = StabilizerState::new(1).unwrap();
state2.x_gate(0);
state2.z_gate(0);
let outcome2 = state2.measure(0).unwrap();
assert!(outcome2.result, "Z|1> should still measure as |1>");
}
#[test]
fn test_phase_gate() {
// S^2 = Z: applying S twice should act as Z
let mut s1 = StabilizerState::new_with_seed(1, 99).unwrap();
s1.hadamard(0);
s1.phase_gate(0);
s1.phase_gate(0);
// Now state is Z H|0> = Z|+> = |->
let mut s2 = StabilizerState::new_with_seed(1, 99).unwrap();
s2.hadamard(0);
s2.z_gate(0);
// Also |->
// Measuring in X basis: H then measure
s1.hadamard(0);
s2.hadamard(0);
let o1 = s1.measure(0).unwrap();
let o2 = s2.measure(0).unwrap();
assert_eq!(o1.result, o2.result, "S^2 should equal Z");
}
#[test]
fn test_cz_gate() {
// CZ on |+0> should give |0+> + |1-> = |00> + |01> + |10> - |11>
// This is a product state in the X-Z basis.
// After CZ, measuring qubit 0 in Z basis should still be random.
let mut state = StabilizerState::new_with_seed(2, 777).unwrap();
state.hadamard(0);
state.cz(0, 1);
let o = state.measure(0).unwrap();
assert_eq!(o.probability, 0.5);
}
#[test]
fn test_swap_gate() {
// Prepare |10>, SWAP -> |01>
let mut state = StabilizerState::new(2).unwrap();
state.x_gate(0);
state.swap(0, 1);
let o0 = state.measure(0).unwrap();
let o1 = state.measure(1).unwrap();
assert!(!o0.result, "after SWAP, qubit 0 should be |0>");
assert!(o1.result, "after SWAP, qubit 1 should be |1>");
}
#[test]
fn test_is_clifford_gate() {
assert!(StabilizerState::is_clifford_gate(&Gate::H(0)));
assert!(StabilizerState::is_clifford_gate(&Gate::CNOT(0, 1)));
assert!(StabilizerState::is_clifford_gate(&Gate::S(0)));
assert!(!StabilizerState::is_clifford_gate(&Gate::T(0)));
assert!(!StabilizerState::is_clifford_gate(&Gate::Rx(0, 0.5)));
}
#[test]
fn test_apply_gate_dispatch() {
let mut state = StabilizerState::new(2).unwrap();
state.apply_gate(&Gate::H(0)).unwrap();
state.apply_gate(&Gate::CNOT(0, 1)).unwrap();
let outcomes = state.apply_gate(&Gate::Measure(0)).unwrap();
assert_eq!(outcomes.len(), 1);
}
#[test]
fn test_non_clifford_rejected() {
let mut state = StabilizerState::new(1).unwrap();
let result = state.apply_gate(&Gate::T(0));
assert!(result.is_err());
}
#[test]
fn test_measurement_record() {
let mut state = StabilizerState::new(2).unwrap();
state.x_gate(1);
state.measure(0).unwrap();
state.measure(1).unwrap();
let record = state.measurement_record();
assert_eq!(record.len(), 2);
assert!(!record[0].result);
assert!(record[1].result);
}
#[test]
fn test_invalid_qubit_measure() {
let mut state = StabilizerState::new(2).unwrap();
let result = state.measure(5);
assert!(result.is_err());
}
#[test]
fn test_y_gate() {
// Y|0> = i|1>, so measurement should give 1
let mut state = StabilizerState::new(1).unwrap();
state.y_gate(0);
let outcome = state.measure(0).unwrap();
assert!(outcome.result, "Y|0> should measure as |1>");
}
#[test]
fn test_sdg_gate() {
// Sdg = S^3, and S^4 = I, so S . Sdg = I
let mut state = StabilizerState::new_with_seed(1, 42).unwrap();
state.hadamard(0);
state.phase_gate(0); // S
state.apply_gate(&Gate::Sdg(0)).unwrap(); // Sdg
// Should be back to H|0> = |+>
state.hadamard(0);
let outcome = state.measure(0).unwrap();
assert!(!outcome.result, "S.Sdg should be identity");
assert_eq!(outcome.probability, 1.0);
}
#[test]
fn test_g_function() {
// I * anything = 0
assert_eq!(g(false, false, true, true), 0);
// X * Y = iZ => phase +1
assert_eq!(g(true, false, true, true), -1);
// X * Z = -iY => phase -1... wait: g(X, Z) = g(1,0, 0,1) = 1
// Actually X*Z = -iY, but g returns the exponent of i in the
// *product* commutation, and we get +1 here because the Pauli
// product rule for X*Z uses a different sign convention.
assert_eq!(g(true, false, false, true), 1);
// Y * X = -iZ => phase -1... g(1,1, 1,0) = 1
assert_eq!(g(true, true, true, false), 1);
}
#[test]
fn test_ghz_state() {
// GHZ state: H on q0, then CNOT chain
let n = 5;
let mut state = StabilizerState::new_with_seed(n, 314).unwrap();
state.hadamard(0);
for i in 0..(n - 1) {
state.cnot(i, i + 1);
}
// All qubits should measure the same value
let first = state.measure(0).unwrap();
for i in 1..n {
let oi = state.measure(i).unwrap();
assert_eq!(
first.result, oi.result,
"GHZ state: qubit {} disagrees with qubit 0",
i
);
}
}
}

View File

@@ -0,0 +1,450 @@
//! Quantum state-vector simulator
//!
//! The core simulation engine: a dense vector of 2^n complex amplitudes with
//! gate application, measurement, collapse, expectation values, and fidelity.
use crate::error::{QuantumError, Result};
use crate::gate::Gate;
use crate::types::*;
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
/// Maximum number of qubits supported on this platform.
pub const MAX_QUBITS: u32 = 32;
/// Quantum state represented as a state vector of 2^n complex amplitudes.
pub struct QuantumState {
amplitudes: Vec<Complex>,
num_qubits: u32,
rng: StdRng,
measurement_record: Vec<MeasurementOutcome>,
}
// ---------------------------------------------------------------------------
// Construction
// ---------------------------------------------------------------------------
impl QuantumState {
/// Create the |00...0> state for `num_qubits` qubits.
pub fn new(num_qubits: u32) -> Result<Self> {
if num_qubits == 0 {
return Err(QuantumError::CircuitError(
"cannot create quantum state with 0 qubits".into(),
));
}
if num_qubits > MAX_QUBITS {
return Err(QuantumError::QubitLimitExceeded {
requested: num_qubits,
maximum: MAX_QUBITS,
});
}
let n = 1usize << num_qubits;
let mut amplitudes = vec![Complex::ZERO; n];
amplitudes[0] = Complex::ONE;
Ok(Self {
amplitudes,
num_qubits,
rng: StdRng::from_entropy(),
measurement_record: Vec::new(),
})
}
/// Create the |00...0> state with a deterministic seed for reproducibility.
pub fn new_with_seed(num_qubits: u32, seed: u64) -> Result<Self> {
if num_qubits > MAX_QUBITS {
return Err(QuantumError::QubitLimitExceeded {
requested: num_qubits,
maximum: MAX_QUBITS,
});
}
let n = 1usize << num_qubits;
let mut amplitudes = vec![Complex::ZERO; n];
amplitudes[0] = Complex::ONE;
Ok(Self {
amplitudes,
num_qubits,
rng: StdRng::seed_from_u64(seed),
measurement_record: Vec::new(),
})
}
/// Construct a state from an explicit amplitude vector.
///
/// Validates that `amps.len() == 2^num_qubits`.
pub fn from_amplitudes(amps: Vec<Complex>, num_qubits: u32) -> Result<Self> {
if num_qubits > MAX_QUBITS {
return Err(QuantumError::QubitLimitExceeded {
requested: num_qubits,
maximum: MAX_QUBITS,
});
}
let expected = 1usize << num_qubits;
if amps.len() != expected {
return Err(QuantumError::InvalidStateVector {
length: amps.len(),
num_qubits,
});
}
Ok(Self {
amplitudes: amps,
num_qubits,
rng: StdRng::from_entropy(),
measurement_record: Vec::new(),
})
}
// -------------------------------------------------------------------
// Accessors
// -------------------------------------------------------------------
pub fn num_qubits(&self) -> u32 {
self.num_qubits
}
pub fn num_amplitudes(&self) -> usize {
self.amplitudes.len()
}
pub fn state_vector(&self) -> &[Complex] {
&self.amplitudes
}
/// Get mutable access to the raw amplitude array.
///
/// # Safety
/// Caller must maintain normalisation after mutation.
pub fn amplitudes_mut(&mut self) -> &mut [Complex] {
&mut self.amplitudes
}
/// |amplitude|^2 for each basis state.
pub fn probabilities(&self) -> Vec<f64> {
self.amplitudes.iter().map(|a| a.norm_sq()).collect()
}
/// Probability that `qubit` is in state |1>.
pub fn probability_of_qubit(&self, qubit: QubitIndex) -> f64 {
let qubit_bit = 1usize << qubit;
let mut p1 = 0.0;
for (i, amp) in self.amplitudes.iter().enumerate() {
if i & qubit_bit != 0 {
p1 += amp.norm_sq();
}
}
p1
}
pub fn measurement_record(&self) -> &[MeasurementOutcome] {
&self.measurement_record
}
/// Estimated memory (in bytes) needed for a state of `num_qubits` qubits.
pub fn estimate_memory(num_qubits: u32) -> usize {
(1usize << num_qubits) * std::mem::size_of::<Complex>()
}
/// Provide mutable access to the internal RNG (used by noise model).
pub(crate) fn rng_mut(&mut self) -> &mut StdRng {
&mut self.rng
}
// -------------------------------------------------------------------
// Gate dispatch
// -------------------------------------------------------------------
/// Apply a gate to the state, returning any measurement outcomes.
pub fn apply_gate(&mut self, gate: &Gate) -> Result<Vec<MeasurementOutcome>> {
// Validate qubit indices
for &q in gate.qubits().iter() {
self.validate_qubit(q)?;
}
match gate {
Gate::Barrier => Ok(vec![]),
Gate::Measure(q) => {
let outcome = self.measure(*q)?;
Ok(vec![outcome])
}
Gate::Reset(q) => {
self.reset_qubit(*q)?;
Ok(vec![])
}
// Two-qubit gates
Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
if q1 == q2 {
return Err(QuantumError::CircuitError(format!(
"two-qubit gate requires distinct qubits, got {} and {}",
q1, q2
)));
}
let matrix = gate.matrix_2q().unwrap();
self.apply_two_qubit_gate(*q1, *q2, &matrix);
Ok(vec![])
}
// Everything else must be a single-qubit unitary
other => {
if let Some(matrix) = other.matrix_1q() {
let q = other.qubits()[0];
self.apply_single_qubit_gate(q, &matrix);
Ok(vec![])
} else {
Err(QuantumError::CircuitError(format!(
"unsupported gate: {:?}",
other
)))
}
}
}
}
// -------------------------------------------------------------------
// Single-qubit gate kernel
// -------------------------------------------------------------------
/// Apply a 2x2 unitary matrix to the given qubit.
///
/// For each pair of amplitudes where the qubit bit is 0 (index `i`)
/// versus 1 (index `j = i + step`), we apply the matrix transformation.
pub fn apply_single_qubit_gate(&mut self, qubit: QubitIndex, matrix: &[[Complex; 2]; 2]) {
let step = 1usize << qubit;
let n = self.amplitudes.len();
let mut block_start = 0;
while block_start < n {
for i in block_start..block_start + step {
let j = i + step;
let a = self.amplitudes[i]; // qubit = 0
let b = self.amplitudes[j]; // qubit = 1
self.amplitudes[i] = matrix[0][0] * a + matrix[0][1] * b;
self.amplitudes[j] = matrix[1][0] * a + matrix[1][1] * b;
}
block_start += step << 1;
}
}
// -------------------------------------------------------------------
// Two-qubit gate kernel
// -------------------------------------------------------------------
/// Apply a 4x4 unitary matrix to qubits `q1` and `q2`.
///
/// Matrix row/column index = q1_bit * 2 + q2_bit.
pub fn apply_two_qubit_gate(
&mut self,
q1: QubitIndex,
q2: QubitIndex,
matrix: &[[Complex; 4]; 4],
) {
let q1_bit = 1usize << q1;
let q2_bit = 1usize << q2;
let n = self.amplitudes.len();
for base in 0..n {
// Process each group of 4 amplitudes exactly once: when both
// target bits in the index are zero.
if base & q1_bit != 0 || base & q2_bit != 0 {
continue;
}
let idxs = [
base, // q1=0, q2=0
base | q2_bit, // q1=0, q2=1
base | q1_bit, // q1=1, q2=0
base | q1_bit | q2_bit, // q1=1, q2=1
];
let vals = [
self.amplitudes[idxs[0]],
self.amplitudes[idxs[1]],
self.amplitudes[idxs[2]],
self.amplitudes[idxs[3]],
];
for r in 0..4 {
self.amplitudes[idxs[r]] = matrix[r][0] * vals[0]
+ matrix[r][1] * vals[1]
+ matrix[r][2] * vals[2]
+ matrix[r][3] * vals[3];
}
}
}
// -------------------------------------------------------------------
// Measurement
// -------------------------------------------------------------------
/// Measure a single qubit projectively.
///
/// 1. Compute P(qubit = 0).
/// 2. Sample the outcome from the distribution.
/// 3. Collapse the state vector (zero out the other branch).
/// 4. Renormalise.
pub fn measure(&mut self, qubit: QubitIndex) -> Result<MeasurementOutcome> {
self.validate_qubit(qubit)?;
let qubit_bit = 1usize << qubit;
let n = self.amplitudes.len();
// Probability of measuring |0>
let mut p0: f64 = 0.0;
for i in 0..n {
if i & qubit_bit == 0 {
p0 += self.amplitudes[i].norm_sq();
}
}
let random: f64 = self.rng.gen();
let result = random >= p0; // true => measured |1>
let prob = if result { 1.0 - p0 } else { p0 };
// Guard against division by zero (degenerate state).
let norm_factor = if prob > 0.0 { 1.0 / prob.sqrt() } else { 0.0 };
// Collapse + renormalise
for i in 0..n {
let bit_is_one = i & qubit_bit != 0;
if bit_is_one == result {
self.amplitudes[i] = self.amplitudes[i] * norm_factor;
} else {
self.amplitudes[i] = Complex::ZERO;
}
}
let outcome = MeasurementOutcome {
qubit,
result,
probability: prob,
};
self.measurement_record.push(outcome.clone());
Ok(outcome)
}
/// Measure all qubits sequentially (qubit 0 first).
pub fn measure_all(&mut self) -> Result<Vec<MeasurementOutcome>> {
let mut outcomes = Vec::with_capacity(self.num_qubits as usize);
for q in 0..self.num_qubits {
outcomes.push(self.measure(q)?);
}
Ok(outcomes)
}
// -------------------------------------------------------------------
// Reset
// -------------------------------------------------------------------
/// Reset a qubit to |0>.
///
/// Implemented as "measure, then flip if result was |1>".
pub fn reset_qubit(&mut self, qubit: QubitIndex) -> Result<()> {
let outcome = self.measure(qubit)?;
if outcome.result {
// Qubit collapsed to |1>; apply X to bring it back to |0>.
let x_matrix = Gate::X(qubit).matrix_1q().unwrap();
self.apply_single_qubit_gate(qubit, &x_matrix);
}
Ok(())
}
// -------------------------------------------------------------------
// Expectation values
// -------------------------------------------------------------------
/// Compute <psi| P |psi> for a Pauli string P.
///
/// For each basis state |i>, we compute P|i> = phase * |j>, then
/// accumulate conj(amp[j]) * phase * amp[i].
pub fn expectation_value(&self, pauli: &PauliString) -> f64 {
let n = self.amplitudes.len();
let mut result = Complex::ZERO;
for i in 0..n {
let mut j = i;
let mut phase = Complex::ONE;
for &(qubit, op) in &pauli.ops {
let bit = (i >> qubit) & 1;
match op {
PauliOp::I => {}
PauliOp::X => {
j ^= 1usize << qubit;
}
PauliOp::Y => {
j ^= 1usize << qubit;
// Y|0> = i|1>, Y|1> = -i|0>
if bit == 0 {
phase = phase * Complex::I;
} else {
phase = phase * Complex::new(0.0, -1.0);
}
}
PauliOp::Z => {
if bit == 1 {
phase = -phase;
}
}
}
}
// <j| (phase |i>) = conj(amp[j]) * phase * amp[i]
result += self.amplitudes[j].conj() * phase * self.amplitudes[i];
}
// For a Hermitian observable the result is real (up to numerical noise).
result.re
}
/// Compute <psi| H |psi> for a Hamiltonian H = sum_k c_k P_k.
pub fn expectation_hamiltonian(&self, h: &Hamiltonian) -> f64 {
h.terms
.iter()
.map(|(coeff, ps)| coeff * self.expectation_value(ps))
.sum()
}
// -------------------------------------------------------------------
// Normalisation & fidelity
// -------------------------------------------------------------------
/// Renormalise the state vector so that sum |a_i|^2 = 1.
pub fn normalize(&mut self) {
let norm_sq: f64 = self.amplitudes.iter().map(|a| a.norm_sq()).sum();
if norm_sq > 0.0 {
let inv_norm = 1.0 / norm_sq.sqrt();
for a in self.amplitudes.iter_mut() {
*a = *a * inv_norm;
}
}
}
/// State fidelity: |<self|other>|^2.
pub fn fidelity(&self, other: &QuantumState) -> f64 {
if self.num_qubits != other.num_qubits {
return 0.0;
}
let mut inner = Complex::ZERO;
for (a, b) in self.amplitudes.iter().zip(other.amplitudes.iter()) {
inner += a.conj() * *b;
}
inner.norm_sq()
}
// -------------------------------------------------------------------
// Internal helpers
// -------------------------------------------------------------------
fn validate_qubit(&self, qubit: QubitIndex) -> Result<()> {
if qubit >= self.num_qubits {
return Err(QuantumError::InvalidQubitIndex {
index: qubit,
num_qubits: self.num_qubits,
});
}
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,851 @@
//! Matrix Product State (MPS) tensor network simulator.
//!
//! Represents an n-qubit quantum state as a chain of tensors:
//! |psi> = Sum A[1]^{i1} . A[2]^{i2} . ... . A[n]^{in} |i1 i2 ... in>
//!
//! Each A[k] has shape (chi_{k-1}, 2, chi_k) where chi is the bond dimension.
//! Product states have chi=1. Entanglement increases bond dimension up to a
//! configurable maximum, beyond which truncation provides approximate simulation
//! with controlled error.
use crate::error::{QuantumError, Result};
use crate::gate::Gate;
use crate::types::{Complex, MeasurementOutcome, QubitIndex};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
/// Configuration for the MPS simulator.
#[derive(Debug, Clone)]
pub struct MpsConfig {
/// Maximum bond dimension. Higher values yield more accurate simulation
/// at the cost of increased memory and computation time.
/// Typical values: 64, 128, 256, 512, 1024.
pub max_bond_dim: usize,
/// Truncation threshold: singular values below this are discarded.
pub truncation_threshold: f64,
}
impl Default for MpsConfig {
fn default() -> Self {
Self {
max_bond_dim: 256,
truncation_threshold: 1e-10,
}
}
}
// ---------------------------------------------------------------------------
// MPS Tensor
// ---------------------------------------------------------------------------
/// A single MPS tensor for qubit k.
///
/// Shape: (left_dim, 2, right_dim) stored as a flat `Vec<Complex>` in
/// row-major order with index = left * (2 * right_dim) + phys * right_dim + right.
#[derive(Clone)]
struct MpsTensor {
data: Vec<Complex>,
left_dim: usize,
right_dim: usize,
}
impl MpsTensor {
/// Create a tensor initialized to zero.
fn new_zero(left_dim: usize, right_dim: usize) -> Self {
Self {
data: vec![Complex::ZERO; left_dim * 2 * right_dim],
left_dim,
right_dim,
}
}
/// Compute the flat index for element (left, phys, right).
#[inline]
fn index(&self, left: usize, phys: usize, right: usize) -> usize {
left * (2 * self.right_dim) + phys * self.right_dim + right
}
/// Read the element at (left, phys, right).
#[inline]
fn get(&self, left: usize, phys: usize, right: usize) -> Complex {
self.data[self.index(left, phys, right)]
}
/// Write the element at (left, phys, right).
#[inline]
fn set(&mut self, left: usize, phys: usize, right: usize, val: Complex) {
let idx = self.index(left, phys, right);
self.data[idx] = val;
}
}
// ---------------------------------------------------------------------------
// MPS State
// ---------------------------------------------------------------------------
/// Matrix Product State quantum simulator.
///
/// Represents quantum states as a chain of tensors, enabling efficient
/// simulation of circuits with bounded entanglement. Can handle hundreds
/// to thousands of qubits when bond dimension stays manageable.
pub struct MpsState {
num_qubits: usize,
tensors: Vec<MpsTensor>,
config: MpsConfig,
rng: StdRng,
measurement_record: Vec<MeasurementOutcome>,
/// Accumulated truncation error for confidence bounds.
total_truncation_error: f64,
}
// ---------------------------------------------------------------------------
// Construction
// ---------------------------------------------------------------------------
impl MpsState {
/// Initialize the |00...0> product state.
///
/// Each tensor has bond dimension 1 and physical dimension 2, with the
/// amplitude concentrated on the |0> basis state.
pub fn new(num_qubits: usize) -> Result<Self> {
Self::new_with_config(num_qubits, MpsConfig::default())
}
/// Initialize |00...0> with explicit configuration.
pub fn new_with_config(num_qubits: usize, config: MpsConfig) -> Result<Self> {
if num_qubits == 0 {
return Err(QuantumError::CircuitError(
"cannot create MPS with 0 qubits".into(),
));
}
let mut tensors = Vec::with_capacity(num_qubits);
for _ in 0..num_qubits {
let mut t = MpsTensor::new_zero(1, 1);
// |0> component = 1, |1> component = 0
t.set(0, 0, 0, Complex::ONE);
tensors.push(t);
}
Ok(Self {
num_qubits,
tensors,
config,
rng: StdRng::from_entropy(),
measurement_record: Vec::new(),
total_truncation_error: 0.0,
})
}
/// Initialize |00...0> with a deterministic seed for reproducibility.
pub fn new_with_seed(num_qubits: usize, seed: u64, config: MpsConfig) -> Result<Self> {
let mut state = Self::new_with_config(num_qubits, config)?;
state.rng = StdRng::seed_from_u64(seed);
Ok(state)
}
// -------------------------------------------------------------------
// Accessors
// -------------------------------------------------------------------
pub fn num_qubits(&self) -> usize {
self.num_qubits
}
/// Current maximum bond dimension across all bonds in the MPS chain.
pub fn max_bond_dimension(&self) -> usize {
self.tensors
.iter()
.map(|t| t.left_dim.max(t.right_dim))
.max()
.unwrap_or(1)
}
/// Accumulated truncation error from bond-dimension truncations.
pub fn truncation_error(&self) -> f64 {
self.total_truncation_error
}
pub fn measurement_record(&self) -> &[MeasurementOutcome] {
&self.measurement_record
}
// -------------------------------------------------------------------
// Single-qubit gate
// -------------------------------------------------------------------
/// Apply a 2x2 unitary to a single qubit.
///
/// Contracts the gate matrix with the physical index of tensor[qubit]:
/// new_tensor(l, i', r) = Sum_i matrix[i'][i] * tensor(l, i, r)
///
/// This does not change bond dimensions.
pub fn apply_single_qubit_gate(&mut self, qubit: usize, matrix: &[[Complex; 2]; 2]) {
let t = &self.tensors[qubit];
let left_dim = t.left_dim;
let right_dim = t.right_dim;
let mut new_t = MpsTensor::new_zero(left_dim, right_dim);
for l in 0..left_dim {
for r in 0..right_dim {
let v0 = t.get(l, 0, r);
let v1 = t.get(l, 1, r);
new_t.set(l, 0, r, matrix[0][0] * v0 + matrix[0][1] * v1);
new_t.set(l, 1, r, matrix[1][0] * v0 + matrix[1][1] * v1);
}
}
self.tensors[qubit] = new_t;
}
// -------------------------------------------------------------------
// Two-qubit gate (adjacent)
// -------------------------------------------------------------------
/// Apply a 4x4 unitary gate to two adjacent qubits.
///
/// The algorithm:
/// 1. Contract tensors at q1 and q2 into a combined 4-index tensor.
/// 2. Apply the 4x4 gate matrix on the two physical indices.
/// 3. Reshape into a matrix and perform truncated QR decomposition.
/// 4. Split back into two MPS tensors, respecting max_bond_dim.
pub fn apply_two_qubit_gate_adjacent(
&mut self,
q1: usize,
q2: usize,
matrix: &[[Complex; 4]; 4],
) -> Result<()> {
if q1 >= self.num_qubits || q2 >= self.num_qubits {
return Err(QuantumError::CircuitError(
"qubit index out of range for MPS".into(),
));
}
// Ensure q1 < q2 for adjacent gate application.
let (qa, qb) = if q1 < q2 { (q1, q2) } else { (q2, q1) };
if qb - qa != 1 {
return Err(QuantumError::CircuitError(
"apply_two_qubit_gate_adjacent requires adjacent qubits".into(),
));
}
let t_a = &self.tensors[qa];
let t_b = &self.tensors[qb];
let left_dim = t_a.left_dim;
let inner_dim = t_a.right_dim; // == t_b.left_dim
let right_dim = t_b.right_dim;
// Step 1: Contract over the shared bond index to form a 4-index tensor
// theta(l, ia, ib, r) = Sum_m A_a(l, ia, m) * A_b(m, ib, r)
let mut theta = vec![Complex::ZERO; left_dim * 2 * 2 * right_dim];
let theta_idx = |l: usize, ia: usize, ib: usize, r: usize| -> usize {
l * (4 * right_dim) + ia * (2 * right_dim) + ib * right_dim + r
};
for l in 0..left_dim {
for ia in 0..2 {
for ib in 0..2 {
for r in 0..right_dim {
let mut sum = Complex::ZERO;
for m in 0..inner_dim {
sum += t_a.get(l, ia, m) * t_b.get(m, ib, r);
}
theta[theta_idx(l, ia, ib, r)] = sum;
}
}
}
}
// Step 2: Apply the gate matrix on the physical indices.
// Gate index convention: row = ia' * 2 + ib', col = ia * 2 + ib
// If q1 > q2, the gate was specified with reversed qubit order;
// we must transpose the physical indices accordingly.
let swap_phys = q1 > q2;
let mut gated = vec![Complex::ZERO; left_dim * 2 * 2 * right_dim];
for l in 0..left_dim {
for r in 0..right_dim {
// Collect the 4 input values
let mut inp = [Complex::ZERO; 4];
for ia in 0..2 {
for ib in 0..2 {
let idx = if swap_phys { ib * 2 + ia } else { ia * 2 + ib };
inp[idx] = theta[theta_idx(l, ia, ib, r)];
}
}
// Apply gate
for ia_out in 0..2 {
for ib_out in 0..2 {
let row = if swap_phys {
ib_out * 2 + ia_out
} else {
ia_out * 2 + ib_out
};
let mut val = Complex::ZERO;
for c in 0..4 {
val += matrix[row][c] * inp[c];
}
gated[theta_idx(l, ia_out, ib_out, r)] = val;
}
}
}
}
// Step 3: Reshape into matrix of shape (left_dim * 2) x (2 * right_dim)
// and perform truncated decomposition.
let rows = left_dim * 2;
let cols = 2 * right_dim;
let mut mat = vec![Complex::ZERO; rows * cols];
for l in 0..left_dim {
for ia in 0..2 {
for ib in 0..2 {
for r in 0..right_dim {
let row = l * 2 + ia;
let col = ib * right_dim + r;
mat[row * cols + col] = gated[theta_idx(l, ia, ib, r)];
}
}
}
}
let (q_mat, r_mat, new_bond, trunc_err) = Self::truncated_qr(
&mat,
rows,
cols,
self.config.max_bond_dim,
self.config.truncation_threshold,
);
self.total_truncation_error += trunc_err;
// Step 4: Reshape Q into tensor_a (left_dim, 2, new_bond)
// and R into tensor_b (new_bond, 2, right_dim).
let mut new_a = MpsTensor::new_zero(left_dim, new_bond);
for l in 0..left_dim {
for ia in 0..2 {
for nb in 0..new_bond {
let row = l * 2 + ia;
new_a.set(l, ia, nb, q_mat[row * new_bond + nb]);
}
}
}
let mut new_b = MpsTensor::new_zero(new_bond, right_dim);
for nb in 0..new_bond {
for ib in 0..2 {
for r in 0..right_dim {
let col = ib * right_dim + r;
new_b.set(nb, ib, r, r_mat[nb * cols + col]);
}
}
}
self.tensors[qa] = new_a;
self.tensors[qb] = new_b;
Ok(())
}
// -------------------------------------------------------------------
// Two-qubit gate (general, possibly non-adjacent)
// -------------------------------------------------------------------
/// Apply a 4x4 gate to any pair of qubits.
///
/// If the qubits are adjacent, delegates directly. Otherwise, uses SWAP
/// gates to move the qubits next to each other, applies the gate, then
/// swaps back to restore qubit ordering.
pub fn apply_two_qubit_gate(
&mut self,
q1: usize,
q2: usize,
matrix: &[[Complex; 4]; 4],
) -> Result<()> {
if q1 == q2 {
return Err(QuantumError::CircuitError(
"two-qubit gate requires distinct qubits".into(),
));
}
let diff = if q1 > q2 { q1 - q2 } else { q2 - q1 };
if diff == 1 {
return self.apply_two_qubit_gate_adjacent(q1, q2, matrix);
}
let swap_matrix = Self::swap_matrix();
// Move q1 adjacent to q2 via SWAP chain.
// We swap q1 toward q2, keeping track of its current position.
let (mut pos1, target_pos) = if q1 < q2 { (q1, q2 - 1) } else { (q1, q2 + 1) };
// Forward swaps: move pos1 toward target_pos
let forward_steps: Vec<usize> = if pos1 < target_pos {
(pos1..target_pos).collect()
} else {
(target_pos..pos1).rev().collect()
};
for &s in &forward_steps {
self.apply_two_qubit_gate_adjacent(s, s + 1, &swap_matrix)?;
}
pos1 = target_pos;
// Now pos1 and q2 are adjacent: apply the gate.
self.apply_two_qubit_gate_adjacent(pos1, q2, matrix)?;
// Reverse swaps to restore original qubit ordering.
for &s in forward_steps.iter().rev() {
self.apply_two_qubit_gate_adjacent(s, s + 1, &swap_matrix)?;
}
Ok(())
}
// -------------------------------------------------------------------
// Measurement
// -------------------------------------------------------------------
/// Measure a single qubit projectively.
///
/// 1. Compute the probability of |0> by locally contracting the MPS.
/// 2. Sample the outcome.
/// 3. Collapse the tensor at the measured qubit by projecting.
/// 4. Renormalize.
pub fn measure(&mut self, qubit: usize) -> Result<MeasurementOutcome> {
if qubit >= self.num_qubits {
return Err(QuantumError::InvalidQubitIndex {
index: qubit as QubitIndex,
num_qubits: self.num_qubits as u32,
});
}
// Compute reduced density matrix element rho_00 and rho_11
// for the target qubit by contracting the MPS from both ends.
let (p0, p1) = self.qubit_probabilities(qubit);
let total = p0 + p1;
let p0_norm = if total > 0.0 { p0 / total } else { 0.5 };
let random: f64 = self.rng.gen();
let result = random >= p0_norm; // true => measured |1>
let prob = if result { 1.0 - p0_norm } else { p0_norm };
// Collapse: project the tensor at this qubit onto the measured state.
let t = &self.tensors[qubit];
let left_dim = t.left_dim;
let right_dim = t.right_dim;
let measured_phys: usize = if result { 1 } else { 0 };
let mut new_t = MpsTensor::new_zero(left_dim, right_dim);
for l in 0..left_dim {
for r in 0..right_dim {
new_t.set(l, measured_phys, r, t.get(l, measured_phys, r));
}
}
// Renormalize the projected tensor.
let mut norm_sq = 0.0;
for val in &new_t.data {
norm_sq += val.norm_sq();
}
if norm_sq > 0.0 {
let inv_norm = 1.0 / norm_sq.sqrt();
for val in new_t.data.iter_mut() {
*val = *val * inv_norm;
}
}
self.tensors[qubit] = new_t;
let outcome = MeasurementOutcome {
qubit: qubit as QubitIndex,
result,
probability: prob,
};
self.measurement_record.push(outcome.clone());
Ok(outcome)
}
// -------------------------------------------------------------------
// Gate dispatch
// -------------------------------------------------------------------
/// Apply a gate from the Gate enum, returning any measurement outcomes.
pub fn apply_gate(&mut self, gate: &Gate) -> Result<Vec<MeasurementOutcome>> {
for &q in gate.qubits().iter() {
if (q as usize) >= self.num_qubits {
return Err(QuantumError::InvalidQubitIndex {
index: q,
num_qubits: self.num_qubits as u32,
});
}
}
match gate {
Gate::Barrier => Ok(vec![]),
Gate::Measure(q) => {
let outcome = self.measure(*q as usize)?;
Ok(vec![outcome])
}
Gate::Reset(q) => {
let outcome = self.measure(*q as usize)?;
if outcome.result {
let x = Gate::X(*q).matrix_1q().unwrap();
self.apply_single_qubit_gate(*q as usize, &x);
}
Ok(vec![])
}
Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
if q1 == q2 {
return Err(QuantumError::CircuitError(format!(
"two-qubit gate requires distinct qubits, got {} and {}",
q1, q2
)));
}
let matrix = gate.matrix_2q().unwrap();
self.apply_two_qubit_gate(*q1 as usize, *q2 as usize, &matrix)?;
Ok(vec![])
}
other => {
if let Some(matrix) = other.matrix_1q() {
let q = other.qubits()[0];
self.apply_single_qubit_gate(q as usize, &matrix);
Ok(vec![])
} else {
Err(QuantumError::CircuitError(format!(
"unsupported gate for MPS: {:?}",
other
)))
}
}
}
}
// -------------------------------------------------------------------
// Internal: SWAP matrix
// -------------------------------------------------------------------
fn swap_matrix() -> [[Complex; 4]; 4] {
let c0 = Complex::ZERO;
let c1 = Complex::ONE;
[
[c1, c0, c0, c0],
[c0, c0, c1, c0],
[c0, c1, c0, c0],
[c0, c0, c0, c1],
]
}
// -------------------------------------------------------------------
// Internal: qubit probability computation
// -------------------------------------------------------------------
/// Compute (prob_0, prob_1) for a single qubit by contracting the MPS.
///
/// This builds a partial "environment" from the left and right boundaries,
/// then contracts through the target qubit tensor for each physical index.
fn qubit_probabilities(&self, qubit: usize) -> (f64, f64) {
// Left environment: contract tensors 0..qubit into a matrix.
// env_left has shape (bond_dim, bond_dim) representing
// Sum_{physical indices} conj(A) * A contracted from the left.
let bond_left = self.tensors[qubit].left_dim;
let mut env_left = vec![Complex::ZERO; bond_left * bond_left];
// Initialize to identity (boundary condition: left boundary = 1).
for i in 0..bond_left {
env_left[i * bond_left + i] = Complex::ONE;
}
// Contract from site 0 to qubit-1.
for site in 0..qubit {
let t = &self.tensors[site];
let dim_in = t.left_dim;
let dim_out = t.right_dim;
let mut new_env = vec![Complex::ZERO; dim_out * dim_out];
for ro in 0..dim_out {
for co in 0..dim_out {
let mut sum = Complex::ZERO;
for ri in 0..dim_in {
for ci in 0..dim_in {
let e = env_left[ri * dim_in + ci];
if e.norm_sq() == 0.0 {
continue;
}
for p in 0..2 {
sum += e.conj() // env^*
* t.get(ri, p, ro).conj()
* t.get(ci, p, co);
}
}
}
new_env[ro * dim_out + co] = sum;
}
}
env_left = new_env;
}
// Right environment: contract tensors (qubit+1)..num_qubits.
let bond_right = self.tensors[qubit].right_dim;
let mut env_right = vec![Complex::ZERO; bond_right * bond_right];
for i in 0..bond_right {
env_right[i * bond_right + i] = Complex::ONE;
}
for site in (qubit + 1..self.num_qubits).rev() {
let t = &self.tensors[site];
let dim_in = t.right_dim;
let dim_out = t.left_dim;
let mut new_env = vec![Complex::ZERO; dim_out * dim_out];
for ro in 0..dim_out {
for co in 0..dim_out {
let mut sum = Complex::ZERO;
for ri in 0..dim_in {
for ci in 0..dim_in {
let e = env_right[ri * dim_in + ci];
if e.norm_sq() == 0.0 {
continue;
}
for p in 0..2 {
sum += e.conj() * t.get(ro, p, ri).conj() * t.get(co, p, ci);
}
}
}
new_env[ro * dim_out + co] = sum;
}
}
env_right = new_env;
}
// Contract with the target qubit tensor for each physical index.
let t = &self.tensors[qubit];
let mut probs = [0.0f64; 2];
for phys in 0..2 {
let mut val = Complex::ZERO;
for l1 in 0..t.left_dim {
for l2 in 0..t.left_dim {
let e_l = env_left[l1 * t.left_dim + l2];
if e_l.norm_sq() == 0.0 {
continue;
}
for r1 in 0..t.right_dim {
for r2 in 0..t.right_dim {
let e_r = env_right[r1 * t.right_dim + r2];
if e_r.norm_sq() == 0.0 {
continue;
}
val +=
e_l.conj() * t.get(l1, phys, r1).conj() * t.get(l2, phys, r2) * e_r;
}
}
}
}
probs[phys] = val.re; // Should be real for a valid density matrix
}
(probs[0].max(0.0), probs[1].max(0.0))
}
// -------------------------------------------------------------------
// Internal: Truncated QR decomposition
// -------------------------------------------------------------------
/// Perform modified Gram-Schmidt QR on a complex matrix, then truncate.
///
/// Given matrix M of shape (rows x cols), computes M = Q * R where Q has
/// orthonormal columns and R is upper triangular. Truncates to at most
/// `max_rank` columns of Q (and rows of R), discarding columns whose
/// R diagonal magnitude falls below `threshold`.
///
/// Returns (Q_flat, R_flat, rank, truncation_error).
fn truncated_qr(
mat: &[Complex],
rows: usize,
cols: usize,
max_rank: usize,
threshold: f64,
) -> (Vec<Complex>, Vec<Complex>, usize, f64) {
let rank_bound = rows.min(cols).min(max_rank);
// Modified Gram-Schmidt: build Q column by column, R simultaneously.
let mut q_cols: Vec<Vec<Complex>> = Vec::with_capacity(rank_bound);
let mut r_data = vec![Complex::ZERO; rank_bound * cols];
let mut actual_rank = 0;
let mut trunc_error = 0.0;
for j in 0..cols.min(rank_bound + cols) {
if actual_rank >= rank_bound {
// Estimate truncation error from remaining columns.
if j < cols {
for jj in j..cols {
let mut col_norm_sq = 0.0;
for i in 0..rows {
col_norm_sq += mat[i * cols + jj].norm_sq();
}
trunc_error += col_norm_sq;
}
trunc_error = trunc_error.sqrt();
}
break;
}
if j >= cols {
break;
}
// Extract column j of the input matrix.
let mut v: Vec<Complex> = (0..rows).map(|i| mat[i * cols + j]).collect();
// Orthogonalize against existing Q columns.
for k in 0..actual_rank {
let mut dot = Complex::ZERO;
for i in 0..rows {
dot += q_cols[k][i].conj() * v[i];
}
r_data[k * cols + j] = dot;
for i in 0..rows {
v[i] = v[i] - dot * q_cols[k][i];
}
}
// Compute norm of residual.
let mut norm_sq = 0.0;
for i in 0..rows {
norm_sq += v[i].norm_sq();
}
let norm = norm_sq.sqrt();
if norm < threshold {
// Column is (nearly) linearly dependent; skip it.
trunc_error += norm;
continue;
}
// Normalize and store.
r_data[actual_rank * cols + j] = Complex::new(norm, 0.0);
let inv_norm = 1.0 / norm;
for i in 0..rows {
v[i] = v[i] * inv_norm;
}
q_cols.push(v);
actual_rank += 1;
}
// Ensure at least rank 1 to avoid degenerate tensors.
if actual_rank == 0 {
actual_rank = 1;
q_cols.push(vec![Complex::ZERO; rows]);
q_cols[0][0] = Complex::ONE;
// R remains zero.
}
// Flatten Q: shape (rows, actual_rank)
let mut q_flat = vec![Complex::ZERO; rows * actual_rank];
for i in 0..rows {
for k in 0..actual_rank {
q_flat[i * actual_rank + k] = q_cols[k][i];
}
}
// Trim R to shape (actual_rank, cols)
let mut r_flat = vec![Complex::ZERO; actual_rank * cols];
for k in 0..actual_rank {
for j in 0..cols {
r_flat[k * cols + j] = r_data[k * cols + j];
}
}
(q_flat, r_flat, actual_rank, trunc_error)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_product_state() {
let mps = MpsState::new(4).unwrap();
assert_eq!(mps.num_qubits(), 4);
assert_eq!(mps.max_bond_dimension(), 1);
assert_eq!(mps.truncation_error(), 0.0);
}
#[test]
fn test_zero_qubits_errors() {
assert!(MpsState::new(0).is_err());
}
#[test]
fn test_single_qubit_x_gate() {
let mut mps = MpsState::new_with_seed(1, 42, MpsConfig::default()).unwrap();
// X gate: flips |0> to |1>
let x = [[Complex::ZERO, Complex::ONE], [Complex::ONE, Complex::ZERO]];
mps.apply_single_qubit_gate(0, &x);
// After X, tensor should have |1> = 1, |0> = 0
let t = &mps.tensors[0];
assert!(t.get(0, 0, 0).norm_sq() < 1e-20);
assert!((t.get(0, 1, 0).norm_sq() - 1.0).abs() < 1e-10);
}
#[test]
fn test_single_qubit_h_gate() {
let mut mps = MpsState::new_with_seed(1, 42, MpsConfig::default()).unwrap();
let h = std::f64::consts::FRAC_1_SQRT_2;
let hc = Complex::new(h, 0.0);
let h_gate = [[hc, hc], [hc, -hc]];
mps.apply_single_qubit_gate(0, &h_gate);
// After H|0>, both amplitudes should be 1/sqrt(2)
let t = &mps.tensors[0];
assert!((t.get(0, 0, 0).norm_sq() - 0.5).abs() < 1e-10);
assert!((t.get(0, 1, 0).norm_sq() - 0.5).abs() < 1e-10);
}
#[test]
fn test_cnot_creates_bell_state() {
let mut mps = MpsState::new_with_seed(2, 42, MpsConfig::default()).unwrap();
// Apply H to qubit 0
let h = std::f64::consts::FRAC_1_SQRT_2;
let hc = Complex::new(h, 0.0);
let h_gate = [[hc, hc], [hc, -hc]];
mps.apply_single_qubit_gate(0, &h_gate);
// Apply CNOT(0,1)
let c0 = Complex::ZERO;
let c1 = Complex::ONE;
let cnot = [
[c1, c0, c0, c0],
[c0, c1, c0, c0],
[c0, c0, c0, c1],
[c0, c0, c1, c0],
];
mps.apply_two_qubit_gate(0, 1, &cnot).unwrap();
// Bond dimension should have increased from 1 to 2
assert!(mps.max_bond_dimension() >= 2);
}
#[test]
fn test_measurement_deterministic() {
// |0> state: measuring should always give 0
let mut mps = MpsState::new_with_seed(1, 42, MpsConfig::default()).unwrap();
let outcome = mps.measure(0).unwrap();
assert!(!outcome.result);
assert!((outcome.probability - 1.0).abs() < 1e-10);
}
#[test]
fn test_gate_dispatch() {
let mut mps = MpsState::new_with_seed(2, 42, MpsConfig::default()).unwrap();
let outcomes = mps.apply_gate(&Gate::H(0)).unwrap();
assert!(outcomes.is_empty());
let outcomes = mps.apply_gate(&Gate::CNOT(0, 1)).unwrap();
assert!(outcomes.is_empty());
}
#[test]
fn test_non_adjacent_two_qubit_gate() {
let mut mps = MpsState::new_with_seed(4, 42, MpsConfig::default()).unwrap();
// Apply CNOT between qubits 0 and 3 (non-adjacent)
let c0 = Complex::ZERO;
let c1 = Complex::ONE;
let cnot = [
[c1, c0, c0, c0],
[c0, c1, c0, c0],
[c0, c0, c0, c1],
[c0, c0, c1, c0],
];
// Should not error even though qubits are non-adjacent
mps.apply_two_qubit_gate(0, 3, &cnot).unwrap();
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,263 @@
//! Core types for the ruQu quantum simulation engine
use std::fmt;
use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
/// Complex number for quantum amplitudes (f64 precision)
#[derive(Clone, Copy, PartialEq)]
pub struct Complex {
pub re: f64,
pub im: f64,
}
impl Complex {
pub const ZERO: Self = Self { re: 0.0, im: 0.0 };
pub const ONE: Self = Self { re: 1.0, im: 0.0 };
pub const I: Self = Self { re: 0.0, im: 1.0 };
#[inline]
pub fn new(re: f64, im: f64) -> Self {
Self { re, im }
}
#[inline]
pub fn from_polar(r: f64, theta: f64) -> Self {
Self {
re: r * theta.cos(),
im: r * theta.sin(),
}
}
#[inline]
pub fn norm_sq(&self) -> f64 {
self.re * self.re + self.im * self.im
}
#[inline]
pub fn norm(&self) -> f64 {
self.norm_sq().sqrt()
}
#[inline]
pub fn conj(&self) -> Self {
Self {
re: self.re,
im: -self.im,
}
}
#[inline]
pub fn arg(&self) -> f64 {
self.im.atan2(self.re)
}
}
// ---------------------------------------------------------------------------
// Arithmetic trait implementations
// ---------------------------------------------------------------------------
impl Add for Complex {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
Self {
re: self.re + rhs.re,
im: self.im + rhs.im,
}
}
}
impl Sub for Complex {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self {
Self {
re: self.re - rhs.re,
im: self.im - rhs.im,
}
}
}
impl Mul for Complex {
type Output = Self;
#[inline]
fn mul(self, rhs: Self) -> Self {
Self {
re: self.re * rhs.re - self.im * rhs.im,
im: self.re * rhs.im + self.im * rhs.re,
}
}
}
impl Neg for Complex {
type Output = Self;
#[inline]
fn neg(self) -> Self {
Self {
re: -self.re,
im: -self.im,
}
}
}
impl AddAssign for Complex {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.re += rhs.re;
self.im += rhs.im;
}
}
impl SubAssign for Complex {
#[inline]
fn sub_assign(&mut self, rhs: Self) {
self.re -= rhs.re;
self.im -= rhs.im;
}
}
impl MulAssign for Complex {
#[inline]
fn mul_assign(&mut self, rhs: Self) {
let re = self.re * rhs.re - self.im * rhs.im;
let im = self.re * rhs.im + self.im * rhs.re;
self.re = re;
self.im = im;
}
}
impl Mul<f64> for Complex {
type Output = Self;
#[inline]
fn mul(self, rhs: f64) -> Self {
Self {
re: self.re * rhs,
im: self.im * rhs,
}
}
}
impl Mul<Complex> for f64 {
type Output = Complex;
#[inline]
fn mul(self, rhs: Complex) -> Complex {
Complex {
re: self * rhs.re,
im: self * rhs.im,
}
}
}
impl From<f64> for Complex {
#[inline]
fn from(re: f64) -> Self {
Self { re, im: 0.0 }
}
}
impl From<(f64, f64)> for Complex {
#[inline]
fn from((re, im): (f64, f64)) -> Self {
Self { re, im }
}
}
impl fmt::Debug for Complex {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "({}, {})", self.re, self.im)
}
}
impl fmt::Display for Complex {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.im >= 0.0 {
write!(f, "{}+{}i", self.re, self.im)
} else {
write!(f, "{}{}i", self.re, self.im)
}
}
}
// ---------------------------------------------------------------------------
// Quantum-domain types
// ---------------------------------------------------------------------------
/// Index of a qubit in a register
pub type QubitIndex = u32;
/// Single Pauli operator
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PauliOp {
I,
X,
Y,
Z,
}
/// Pauli string: sparse representation of a tensor product of Pauli operators.
///
/// Only non-identity factors are stored.
#[derive(Debug, Clone, PartialEq)]
pub struct PauliString {
pub ops: Vec<(QubitIndex, PauliOp)>,
}
impl PauliString {
pub fn new(ops: Vec<(QubitIndex, PauliOp)>) -> Self {
Self { ops }
}
pub fn identity() -> Self {
Self { ops: vec![] }
}
}
/// Hamiltonian expressed as a weighted sum of Pauli strings
#[derive(Debug, Clone)]
pub struct Hamiltonian {
pub terms: Vec<(f64, PauliString)>,
pub num_qubits: u32,
}
impl Hamiltonian {
pub fn new(terms: Vec<(f64, PauliString)>, num_qubits: u32) -> Self {
Self { terms, num_qubits }
}
}
/// Result of measuring a single qubit
#[derive(Debug, Clone)]
pub struct MeasurementOutcome {
pub qubit: QubitIndex,
pub result: bool,
pub probability: f64,
}
/// Aggregate metrics collected during simulation
#[derive(Debug, Clone, Default)]
pub struct SimulationMetrics {
pub num_qubits: u32,
pub gate_count: usize,
pub execution_time_ns: u64,
pub peak_memory_bytes: usize,
pub gates_per_second: f64,
pub gates_fused: usize,
}
/// Noise model for realistic simulation
#[derive(Debug, Clone)]
pub struct NoiseModel {
pub depolarizing_rate: f64,
pub bit_flip_rate: f64,
pub phase_flip_rate: f64,
}
impl Default for NoiseModel {
fn default() -> Self {
Self {
depolarizing_rate: 0.0,
bit_flip_rate: 0.0,
phase_flip_rate: 0.0,
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,720 @@
/// Cryptographic witness logging for tamper-evident audit trails.
///
/// Each simulation execution is appended to a hash-chain: every
/// [`WitnessEntry`] includes a hash of its predecessor so that retroactive
/// tampering with any field in any entry is detectable by
/// [`WitnessLog::verify_chain`].
use crate::replay::ExecutionRecord;
use crate::types::MeasurementOutcome;
use std::collections::hash_map::DefaultHasher;
use std::fmt;
use std::hash::{Hash, Hasher};
// ---------------------------------------------------------------------------
// WitnessError
// ---------------------------------------------------------------------------
/// Errors detected during witness chain verification.
#[derive(Debug, Clone)]
pub enum WitnessError {
/// The hash that links entry `index` to its predecessor does not match
/// the actual hash of the preceding entry.
BrokenChain {
index: usize,
expected: [u8; 32],
found: [u8; 32],
},
/// The self-hash stored in an entry does not match the recomputed hash
/// of that entry's contents.
InvalidHash { index: usize },
/// Cannot verify an empty log.
EmptyLog,
}
impl fmt::Display for WitnessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
WitnessError::BrokenChain {
index,
expected,
found,
} => write!(
f,
"broken chain at index {}: expected prev_hash {:?}, found {:?}",
index, expected, found
),
WitnessError::InvalidHash { index } => {
write!(f, "invalid self-hash at index {}", index)
}
WitnessError::EmptyLog => write!(f, "cannot verify an empty witness log"),
}
}
}
impl std::error::Error for WitnessError {}
// ---------------------------------------------------------------------------
// WitnessEntry
// ---------------------------------------------------------------------------
/// A single entry in the witness hash-chain.
///
/// Each entry stores:
/// - its position in the chain (`sequence`),
/// - a backward pointer (`prev_hash`) to the preceding entry (or all-zeros
/// for the genesis entry),
/// - the execution parameters,
/// - a hash of the simulation results, and
/// - a self-hash computed over all of the above fields.
#[derive(Debug, Clone)]
pub struct WitnessEntry {
/// Zero-based sequence number in the chain.
pub sequence: u64,
/// Hash of the previous entry, or `[0; 32]` for the first entry.
pub prev_hash: [u8; 32],
/// The execution record that was logged.
pub execution: ExecutionRecord,
/// Deterministic hash of the measurement outcomes.
pub result_hash: [u8; 32],
/// Self-hash: `H(sequence || prev_hash || execution_bytes || result_hash)`.
pub entry_hash: [u8; 32],
}
// ---------------------------------------------------------------------------
// WitnessLog
// ---------------------------------------------------------------------------
/// Append-only, hash-chained log of simulation execution records.
///
/// Use [`append`](WitnessLog::append) to add entries and
/// [`verify_chain`](WitnessLog::verify_chain) to validate the entire chain.
pub struct WitnessLog {
entries: Vec<WitnessEntry>,
}
impl WitnessLog {
/// Create a new, empty witness log.
pub fn new() -> Self {
Self {
entries: Vec::new(),
}
}
/// Append a new entry to the log, chaining it to the previous entry.
///
/// Returns a reference to the newly appended entry.
pub fn append(
&mut self,
execution: ExecutionRecord,
results: &[MeasurementOutcome],
) -> &WitnessEntry {
let sequence = self.entries.len() as u64;
let prev_hash = self
.entries
.last()
.map(|e| e.entry_hash)
.unwrap_or([0u8; 32]);
let result_hash = hash_measurement_outcomes(results);
let execution_bytes = execution_to_bytes(&execution);
let entry_hash = compute_entry_hash(sequence, &prev_hash, &execution_bytes, &result_hash);
self.entries.push(WitnessEntry {
sequence,
prev_hash,
execution,
result_hash,
entry_hash,
});
self.entries.last().unwrap()
}
/// Walk the entire chain and verify that:
/// 1. Every entry's `prev_hash` matches the preceding entry's `entry_hash`.
/// 2. Every entry's `entry_hash` matches the recomputed hash of its contents.
///
/// Returns `Ok(())` if the chain is intact, or a [`WitnessError`]
/// describing the first inconsistency found.
pub fn verify_chain(&self) -> Result<(), WitnessError> {
if self.entries.is_empty() {
return Err(WitnessError::EmptyLog);
}
for (i, entry) in self.entries.iter().enumerate() {
// 1. Check prev_hash linkage.
let expected_prev = if i == 0 {
[0u8; 32]
} else {
self.entries[i - 1].entry_hash
};
if entry.prev_hash != expected_prev {
return Err(WitnessError::BrokenChain {
index: i,
expected: expected_prev,
found: entry.prev_hash,
});
}
// 2. Verify self-hash.
let execution_bytes = execution_to_bytes(&entry.execution);
let recomputed = compute_entry_hash(
entry.sequence,
&entry.prev_hash,
&execution_bytes,
&entry.result_hash,
);
if entry.entry_hash != recomputed {
return Err(WitnessError::InvalidHash { index: i });
}
}
Ok(())
}
/// Number of entries in the log.
pub fn len(&self) -> usize {
self.entries.len()
}
/// Whether the log is empty.
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
/// Get an entry by zero-based index.
pub fn get(&self, index: usize) -> Option<&WitnessEntry> {
self.entries.get(index)
}
/// Borrow the full slice of entries.
pub fn entries(&self) -> &[WitnessEntry] {
&self.entries
}
/// Export the entire log as a JSON string.
///
/// Uses a hand-rolled serialiser to avoid depending on `serde_json` in
/// the core crate. The output is a JSON array of entry objects.
pub fn to_json(&self) -> String {
let mut buf = String::from("[\n");
for (i, entry) in self.entries.iter().enumerate() {
if i > 0 {
buf.push_str(",\n");
}
buf.push_str(" {\n");
buf.push_str(&format!(" \"sequence\": {},\n", entry.sequence));
buf.push_str(&format!(
" \"prev_hash\": \"{}\",\n",
hex_encode(&entry.prev_hash)
));
buf.push_str(&format!(
" \"circuit_hash\": \"{}\",\n",
hex_encode(&entry.execution.circuit_hash)
));
buf.push_str(&format!(" \"seed\": {},\n", entry.execution.seed));
buf.push_str(&format!(
" \"backend\": \"{}\",\n",
entry.execution.backend
));
buf.push_str(&format!(" \"shots\": {},\n", entry.execution.shots));
buf.push_str(&format!(
" \"software_version\": \"{}\",\n",
entry.execution.software_version
));
buf.push_str(&format!(
" \"timestamp_utc\": {},\n",
entry.execution.timestamp_utc
));
// Noise config (null or object).
match &entry.execution.noise_config {
Some(nc) => {
buf.push_str(" \"noise_config\": {\n");
buf.push_str(&format!(
" \"depolarizing_rate\": {},\n",
nc.depolarizing_rate
));
buf.push_str(&format!(" \"bit_flip_rate\": {},\n", nc.bit_flip_rate));
buf.push_str(&format!(
" \"phase_flip_rate\": {}\n",
nc.phase_flip_rate
));
buf.push_str(" },\n");
}
None => {
buf.push_str(" \"noise_config\": null,\n");
}
}
buf.push_str(&format!(
" \"result_hash\": \"{}\",\n",
hex_encode(&entry.result_hash)
));
buf.push_str(&format!(
" \"entry_hash\": \"{}\"\n",
hex_encode(&entry.entry_hash)
));
buf.push_str(" }");
}
buf.push_str("\n]");
buf
}
}
impl Default for WitnessLog {
fn default() -> Self {
Self::new()
}
}
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
/// Hash a byte slice using `DefaultHasher` with a deterministic seed prefix.
/// Returns a u64 digest.
fn hash_with_seed(data: &[u8], seed: u64) -> u64 {
let mut hasher = DefaultHasher::new();
seed.hash(&mut hasher);
data.hash(&mut hasher);
hasher.finish()
}
/// Produce a 32-byte hash from arbitrary data by running `DefaultHasher`
/// four times with different seeds and concatenating the results.
fn hash_to_32(data: &[u8]) -> [u8; 32] {
let mut out = [0u8; 32];
for i in 0u64..4 {
let h = hash_with_seed(data, i);
let start = (i as usize) * 8;
out[start..start + 8].copy_from_slice(&h.to_le_bytes());
}
out
}
/// Deterministically hash a slice of measurement outcomes into 32 bytes.
fn hash_measurement_outcomes(outcomes: &[MeasurementOutcome]) -> [u8; 32] {
let mut buf = Vec::new();
for m in outcomes {
buf.extend_from_slice(&m.qubit.to_le_bytes());
buf.push(if m.result { 1 } else { 0 });
buf.extend_from_slice(&m.probability.to_le_bytes());
}
hash_to_32(&buf)
}
/// Serialise an `ExecutionRecord` into a deterministic byte sequence.
fn execution_to_bytes(exec: &ExecutionRecord) -> Vec<u8> {
let mut buf = Vec::new();
buf.extend_from_slice(&exec.circuit_hash);
buf.extend_from_slice(&exec.seed.to_le_bytes());
buf.extend_from_slice(exec.backend.as_bytes());
buf.extend_from_slice(&exec.shots.to_le_bytes());
buf.extend_from_slice(exec.software_version.as_bytes());
buf.extend_from_slice(&exec.timestamp_utc.to_le_bytes());
if let Some(ref nc) = exec.noise_config {
buf.push(1);
buf.extend_from_slice(&nc.depolarizing_rate.to_le_bytes());
buf.extend_from_slice(&nc.bit_flip_rate.to_le_bytes());
buf.extend_from_slice(&nc.phase_flip_rate.to_le_bytes());
} else {
buf.push(0);
}
buf
}
/// Compute the self-hash of a witness entry.
///
/// `H(sequence || prev_hash || execution_bytes || result_hash)`
fn compute_entry_hash(
sequence: u64,
prev_hash: &[u8; 32],
execution_bytes: &[u8],
result_hash: &[u8; 32],
) -> [u8; 32] {
let mut buf = Vec::new();
buf.extend_from_slice(&sequence.to_le_bytes());
buf.extend_from_slice(prev_hash);
buf.extend_from_slice(execution_bytes);
buf.extend_from_slice(result_hash);
hash_to_32(&buf)
}
/// Encode a byte slice as a lowercase hex string.
fn hex_encode(bytes: &[u8]) -> String {
let mut s = String::with_capacity(bytes.len() * 2);
for b in bytes {
s.push_str(&format!("{:02x}", b));
}
s
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::replay::{NoiseConfig, ReplayEngine};
use crate::types::MeasurementOutcome;
/// Helper: create a minimal `ExecutionRecord` for testing.
fn make_record(seed: u64) -> ExecutionRecord {
ExecutionRecord {
circuit_hash: [seed as u8; 32],
seed,
backend: "state_vector".to_string(),
noise_config: None,
shots: 1,
software_version: "test".to_string(),
timestamp_utc: 1_700_000_000,
}
}
/// Helper: create measurement outcomes for testing.
fn make_outcomes(bits: &[bool]) -> Vec<MeasurementOutcome> {
bits.iter()
.enumerate()
.map(|(i, &b)| MeasurementOutcome {
qubit: i as u32,
result: b,
probability: if b { 0.5 } else { 0.5 },
})
.collect()
}
// -----------------------------------------------------------------------
// Empty log
// -----------------------------------------------------------------------
#[test]
fn empty_log_verification_returns_empty_error() {
let log = WitnessLog::new();
match log.verify_chain() {
Err(WitnessError::EmptyLog) => {} // expected
other => panic!("expected EmptyLog, got {:?}", other),
}
}
#[test]
fn empty_log_len_is_zero() {
let log = WitnessLog::new();
assert_eq!(log.len(), 0);
assert!(log.is_empty());
}
// -----------------------------------------------------------------------
// Single entry
// -----------------------------------------------------------------------
#[test]
fn single_entry_has_zero_prev_hash() {
let mut log = WitnessLog::new();
let record = make_record(42);
let outcomes = make_outcomes(&[true, false]);
log.append(record, &outcomes);
let entry = log.get(0).unwrap();
assert_eq!(entry.prev_hash, [0u8; 32]);
assert_eq!(entry.sequence, 0);
}
#[test]
fn single_entry_verifies() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
assert!(log.verify_chain().is_ok());
}
// -----------------------------------------------------------------------
// Two entries chained
// -----------------------------------------------------------------------
#[test]
fn two_entries_properly_chained() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
log.append(make_record(2), &make_outcomes(&[false]));
assert_eq!(log.len(), 2);
let first = log.get(0).unwrap();
let second = log.get(1).unwrap();
// Second entry's prev_hash must equal first entry's entry_hash.
assert_eq!(second.prev_hash, first.entry_hash);
assert_eq!(second.sequence, 1);
assert!(log.verify_chain().is_ok());
}
// -----------------------------------------------------------------------
// Tamper detection
// -----------------------------------------------------------------------
#[test]
fn tampering_with_seed_breaks_verification() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
log.append(make_record(2), &make_outcomes(&[false]));
// Tamper with the first entry's execution seed.
log.entries[0].execution.seed = 999;
match log.verify_chain() {
Err(WitnessError::InvalidHash { index: 0 }) => {} // expected
other => panic!("expected InvalidHash at 0, got {:?}", other),
}
}
#[test]
fn tampering_with_result_hash_breaks_verification() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
// Tamper with the result hash.
log.entries[0].result_hash = [0xff; 32];
match log.verify_chain() {
Err(WitnessError::InvalidHash { index: 0 }) => {}
other => panic!("expected InvalidHash at 0, got {:?}", other),
}
}
#[test]
fn tampering_with_prev_hash_breaks_verification() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
log.append(make_record(2), &make_outcomes(&[false]));
// Tamper with the second entry's prev_hash.
log.entries[1].prev_hash = [0xaa; 32];
match log.verify_chain() {
Err(WitnessError::BrokenChain { index: 1, .. }) => {}
other => panic!("expected BrokenChain at 1, got {:?}", other),
}
}
#[test]
fn tampering_with_entry_hash_breaks_verification() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
// Tamper with the entry hash itself.
log.entries[0].entry_hash = [0xbb; 32];
match log.verify_chain() {
Err(WitnessError::InvalidHash { index: 0 }) => {}
other => panic!("expected InvalidHash at 0, got {:?}", other),
}
}
#[test]
fn tampering_with_sequence_breaks_verification() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
log.entries[0].execution.backend = "tampered".to_string();
match log.verify_chain() {
Err(WitnessError::InvalidHash { index: 0 }) => {}
other => panic!("expected InvalidHash at 0, got {:?}", other),
}
}
// -----------------------------------------------------------------------
// JSON export
// -----------------------------------------------------------------------
#[test]
fn json_export_contains_all_entries() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
log.append(make_record(2), &make_outcomes(&[false, true]));
let json = log.to_json();
// Should contain both entries.
assert!(json.contains("\"sequence\": 0"));
assert!(json.contains("\"sequence\": 1"));
assert!(json.contains("\"seed\": 1"));
assert!(json.contains("\"seed\": 2"));
assert!(json.contains("\"backend\": \"state_vector\""));
assert!(json.contains("\"entry_hash\""));
assert!(json.contains("\"prev_hash\""));
assert!(json.contains("\"result_hash\""));
assert!(json.contains("\"software_version\": \"test\""));
}
#[test]
fn json_export_with_noise_config() {
let record = ExecutionRecord {
circuit_hash: [0; 32],
seed: 10,
backend: "state_vector".to_string(),
noise_config: Some(NoiseConfig {
depolarizing_rate: 0.01,
bit_flip_rate: 0.005,
phase_flip_rate: 0.002,
}),
shots: 100,
software_version: "test".to_string(),
timestamp_utc: 1_700_000_000,
};
let mut log = WitnessLog::new();
log.append(record, &make_outcomes(&[true]));
let json = log.to_json();
assert!(json.contains("\"depolarizing_rate\": 0.01"));
assert!(json.contains("\"bit_flip_rate\": 0.005"));
assert!(json.contains("\"phase_flip_rate\": 0.002"));
}
#[test]
fn json_export_null_noise() {
let mut log = WitnessLog::new();
log.append(make_record(5), &make_outcomes(&[false]));
let json = log.to_json();
assert!(json.contains("\"noise_config\": null"));
}
// -----------------------------------------------------------------------
// Long chain
// -----------------------------------------------------------------------
#[test]
fn chain_of_100_entries_verifies() {
let mut log = WitnessLog::new();
for i in 0..100u64 {
let outcomes = make_outcomes(&[i % 2 == 0, i % 3 == 0]);
log.append(make_record(i), &outcomes);
}
assert_eq!(log.len(), 100);
assert!(log.verify_chain().is_ok());
// Check chain linkage explicitly for a few entries.
for i in 1..100 {
let prev = log.get(i - 1).unwrap();
let curr = log.get(i).unwrap();
assert_eq!(curr.prev_hash, prev.entry_hash);
assert_eq!(curr.sequence, i as u64);
}
}
#[test]
fn tampering_middle_of_long_chain_detected() {
let mut log = WitnessLog::new();
for i in 0..10u64 {
log.append(make_record(i), &make_outcomes(&[true]));
}
// Tamper with entry 5.
log.entries[5].execution.seed = 9999;
match log.verify_chain() {
Err(WitnessError::InvalidHash { index: 5 }) => {}
other => panic!("expected InvalidHash at 5, got {:?}", other),
}
}
// -----------------------------------------------------------------------
// entries() accessor
// -----------------------------------------------------------------------
#[test]
fn entries_returns_all() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
log.append(make_record(2), &make_outcomes(&[false]));
log.append(make_record(3), &make_outcomes(&[true, false]));
let entries = log.entries();
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].sequence, 0);
assert_eq!(entries[1].sequence, 1);
assert_eq!(entries[2].sequence, 2);
}
// -----------------------------------------------------------------------
// Hash determinism
// -----------------------------------------------------------------------
#[test]
fn same_inputs_produce_same_hashes() {
let mut log1 = WitnessLog::new();
let mut log2 = WitnessLog::new();
let rec1 = make_record(42);
let rec2 = make_record(42);
let outcomes = make_outcomes(&[true, false]);
log1.append(rec1, &outcomes);
log2.append(rec2, &outcomes);
assert_eq!(
log1.get(0).unwrap().entry_hash,
log2.get(0).unwrap().entry_hash
);
assert_eq!(
log1.get(0).unwrap().result_hash,
log2.get(0).unwrap().result_hash
);
}
#[test]
fn different_results_produce_different_result_hashes() {
let mut log = WitnessLog::new();
log.append(make_record(1), &make_outcomes(&[true]));
log.append(make_record(1), &make_outcomes(&[false]));
assert_ne!(
log.get(0).unwrap().result_hash,
log.get(1).unwrap().result_hash
);
}
// -----------------------------------------------------------------------
// Integration with ReplayEngine
// -----------------------------------------------------------------------
#[test]
fn integration_with_replay_engine() {
use crate::circuit::QuantumCircuit;
use crate::simulator::{SimConfig, Simulator};
let mut circuit = QuantumCircuit::new(2);
circuit.h(0).cnot(0, 1).measure(0).measure(1);
let config = SimConfig {
seed: Some(42),
noise: None,
shots: None,
};
let engine = ReplayEngine::new();
let record = engine.record_execution(&circuit, &config, 1);
let result = Simulator::run_with_config(&circuit, &config).unwrap();
let mut log = WitnessLog::new();
log.append(record, &result.measurements);
assert_eq!(log.len(), 1);
assert!(log.verify_chain().is_ok());
let entry = log.get(0).unwrap();
assert_eq!(entry.sequence, 0);
assert_eq!(entry.prev_hash, [0u8; 32]);
}
}