Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,325 @@
//! Shared test helpers for the ruvector-solver integration test suite.
//!
//! Provides deterministic random matrix generators, dense reference solvers,
//! and floating-point comparison utilities used across all test modules.
use ruvector_solver::types::CsrMatrix;
// ---------------------------------------------------------------------------
// Random number generator (simple LCG for deterministic reproducibility)
// ---------------------------------------------------------------------------
/// A minimal linear congruential generator for deterministic test data.
///
/// Uses the Numerical Recipes LCG parameters. Not cryptographically secure,
/// but perfectly adequate for generating reproducible test matrices.
pub struct Lcg {
state: u64,
}
impl Lcg {
/// Create a new LCG with the given seed.
pub fn new(seed: u64) -> Self {
Self { state: seed }
}
/// Generate the next u64 value.
pub fn next_u64(&mut self) -> u64 {
self.state = self
.state
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
self.state
}
/// Generate a uniform f64 in [0, 1).
pub fn next_f64(&mut self) -> f64 {
(self.next_u64() >> 11) as f64 / (1u64 << 53) as f64
}
/// Generate a uniform f64 in [lo, hi).
pub fn next_f64_range(&mut self, lo: f64, hi: f64) -> f64 {
lo + (hi - lo) * self.next_f64()
}
}
// ---------------------------------------------------------------------------
// Matrix generators
// ---------------------------------------------------------------------------
/// Generate a random diagonally dominant CSR matrix of dimension `n`.
///
/// Each row has approximately `density * n` non-zero off-diagonal entries
/// (at least 1). The diagonal entry is set to `1 + sum_of_abs_off_diag`
/// to guarantee strict diagonal dominance.
///
/// The resulting matrix is suitable for Neumann and Jacobi solvers.
pub fn random_diag_dominant_csr(n: usize, density: f64, seed: u64) -> CsrMatrix<f64> {
let mut rng = Lcg::new(seed);
let mut entries: Vec<(usize, usize, f64)> = Vec::new();
for i in 0..n {
let mut off_diag_sum = 0.0f64;
for j in 0..n {
if i == j {
continue;
}
if rng.next_f64() < density {
let val = rng.next_f64_range(-1.0, 1.0);
entries.push((i, j, val));
off_diag_sum += val.abs();
}
}
// Ensure at least one off-diagonal entry per row for non-trivial testing.
if off_diag_sum == 0.0 && n > 1 {
let j = (i + 1) % n;
let val = rng.next_f64_range(0.1, 0.5);
entries.push((i, j, val));
off_diag_sum = val;
}
// Diagonal: strictly dominant.
let diag_val = off_diag_sum + 1.0 + rng.next_f64();
entries.push((i, i, diag_val));
}
CsrMatrix::<f64>::from_coo(n, n, entries)
}
/// Generate a random graph Laplacian CSR matrix of dimension `n`.
///
/// A graph Laplacian `L = D - A` where:
/// - `A` is the adjacency matrix of a random undirected graph.
/// - `D` is the degree matrix.
/// - Each row sums to zero (L * ones = 0).
///
/// The resulting matrix is symmetric positive semi-definite.
pub fn random_laplacian_csr(n: usize, density: f64, seed: u64) -> CsrMatrix<f64> {
let mut rng = Lcg::new(seed);
// Build symmetric adjacency: for i < j, randomly include edge (i,j).
let mut adj = vec![vec![0.0f64; n]; n];
for i in 0..n {
for j in (i + 1)..n {
if rng.next_f64() < density {
let weight = rng.next_f64_range(0.1, 2.0);
adj[i][j] = weight;
adj[j][i] = weight;
}
}
}
// Ensure the graph is connected: add a path 0-1-2-...-n-1.
for i in 0..n.saturating_sub(1) {
if adj[i][i + 1] == 0.0 {
let weight = rng.next_f64_range(0.1, 1.0);
adj[i][i + 1] = weight;
adj[i + 1][i] = weight;
}
}
// Build Laplacian: L[i][j] = -A[i][j] for i != j, L[i][i] = sum_j A[i][j].
let mut entries: Vec<(usize, usize, f64)> = Vec::new();
for i in 0..n {
let mut degree = 0.0f64;
for j in 0..n {
if i != j && adj[i][j] != 0.0 {
entries.push((i, j, -adj[i][j]));
degree += adj[i][j];
}
}
entries.push((i, i, degree));
}
CsrMatrix::<f64>::from_coo(n, n, entries)
}
/// Generate a random SPD (symmetric positive definite) matrix.
///
/// Constructs `A = B^T B + epsilon * I` where `B` has random entries,
/// guaranteeing positive definiteness.
pub fn random_spd_csr(n: usize, density: f64, seed: u64) -> CsrMatrix<f64> {
let mut rng = Lcg::new(seed);
// Build a random dense matrix B, then compute A = B^T B + eps * I.
// For efficiency with CSR, we do this differently: build a sparse
// symmetric matrix and add a diagonal shift.
let mut dense = vec![vec![0.0f64; n]; n];
for i in 0..n {
for j in i..n {
if i == j || rng.next_f64() < density {
let val = rng.next_f64_range(-1.0, 1.0);
dense[i][j] += val;
if i != j {
dense[j][i] += val;
}
}
}
}
// Compute A = M^T M where M = dense (makes it PSD).
let mut a = vec![vec![0.0f64; n]; n];
for i in 0..n {
for j in 0..n {
let mut sum = 0.0;
for k in 0..n {
sum += dense[k][i] * dense[k][j];
}
a[i][j] = sum;
}
}
// Add diagonal shift to ensure positive definiteness.
for i in 0..n {
a[i][i] += 1.0;
}
// Convert to COO.
let mut entries: Vec<(usize, usize, f64)> = Vec::new();
for i in 0..n {
for j in 0..n {
if a[i][j].abs() > 1e-15 {
entries.push((i, j, a[i][j]));
}
}
}
CsrMatrix::<f64>::from_coo(n, n, entries)
}
/// Generate a deterministic random vector of length `n`.
pub fn random_vector(n: usize, seed: u64) -> Vec<f64> {
let mut rng = Lcg::new(seed);
(0..n).map(|_| rng.next_f64_range(-1.0, 1.0)).collect()
}
/// Build a simple undirected graph as a CSR adjacency matrix.
///
/// Each entry `(u, v)` in `edges` creates entries `A[u][v] = 1` and
/// `A[v][u] = 1`.
pub fn adjacency_from_edges(n: usize, edges: &[(usize, usize)]) -> CsrMatrix<f64> {
let mut entries: Vec<(usize, usize, f64)> = Vec::new();
for &(u, v) in edges {
entries.push((u, v, 1.0));
if u != v {
entries.push((v, u, 1.0));
}
}
CsrMatrix::<f64>::from_coo(n, n, entries)
}
// ---------------------------------------------------------------------------
// Dense reference solver
// ---------------------------------------------------------------------------
/// Solve `Ax = b` using dense Gaussian elimination with partial pivoting.
///
/// This is an O(n^3) reference solver used only for small test problems
/// to verify iterative solver accuracy.
///
/// # Panics
///
/// Panics if the matrix is singular or dimensions are inconsistent.
pub fn dense_solve(matrix: &CsrMatrix<f64>, rhs: &[f64]) -> Vec<f64> {
let n = matrix.rows;
assert_eq!(n, matrix.cols, "dense_solve requires a square matrix");
assert_eq!(rhs.len(), n, "rhs length must match matrix dimension");
// Convert CSR to dense augmented matrix [A | b].
let mut aug = vec![vec![0.0f64; n + 1]; n];
for i in 0..n {
aug[i][n] = rhs[i];
let start = matrix.row_ptr[i];
let end = matrix.row_ptr[i + 1];
for idx in start..end {
let j = matrix.col_indices[idx];
aug[i][j] = matrix.values[idx];
}
}
// Forward elimination with partial pivoting.
for col in 0..n {
// Find pivot.
let mut max_row = col;
let mut max_val = aug[col][col].abs();
for row in (col + 1)..n {
if aug[row][col].abs() > max_val {
max_val = aug[row][col].abs();
max_row = row;
}
}
assert!(max_val > 1e-15, "matrix is singular or near-singular");
aug.swap(col, max_row);
// Eliminate.
let pivot = aug[col][col];
for row in (col + 1)..n {
let factor = aug[row][col] / pivot;
for j in col..=n {
aug[row][j] -= factor * aug[col][j];
}
}
}
// Back substitution.
let mut x = vec![0.0f64; n];
for i in (0..n).rev() {
let mut sum = aug[i][n];
for j in (i + 1)..n {
sum -= aug[i][j] * x[j];
}
x[i] = sum / aug[i][i];
}
x
}
// ---------------------------------------------------------------------------
// Floating-point comparison utilities
// ---------------------------------------------------------------------------
/// Compute the L2 norm of a vector.
pub fn l2_norm(v: &[f64]) -> f64 {
v.iter().map(|&x| x * x).sum::<f64>().sqrt()
}
/// Compute the L2 distance between two vectors.
pub fn l2_distance(a: &[f64], b: &[f64]) -> f64 {
assert_eq!(a.len(), b.len(), "vectors must have same length");
a.iter()
.zip(b.iter())
.map(|(&ai, &bi)| (ai - bi) * (ai - bi))
.sum::<f64>()
.sqrt()
}
/// Compute the relative error ||approx - exact|| / ||exact||.
///
/// Returns absolute error if the exact solution has zero norm.
pub fn relative_error(approx: &[f64], exact: &[f64]) -> f64 {
let exact_norm = l2_norm(exact);
let error = l2_distance(approx, exact);
if exact_norm > 1e-15 {
error / exact_norm
} else {
error
}
}
/// Compute the residual `b - A*x` for a sparse system.
pub fn compute_residual(matrix: &CsrMatrix<f64>, x: &[f64], rhs: &[f64]) -> Vec<f64> {
let n = matrix.rows;
let mut ax = vec![0.0f64; n];
matrix.spmv(x, &mut ax);
(0..n).map(|i| rhs[i] - ax[i]).collect()
}
/// Convert an f32 solution vector to f64 for comparison.
pub fn f32_to_f64(v: &[f32]) -> Vec<f64> {
v.iter().map(|&x| x as f64).collect()
}

View File

@@ -0,0 +1,249 @@
//! Integration tests for the Conjugate Gradient (CG) solver.
//!
//! Tests cover correctness on SPD systems, Laplacian solves, preconditioning
//! benefits, known-solution verification, and tolerance scaling.
mod helpers;
use approx::assert_relative_eq;
use ruvector_solver::cg::ConjugateGradientSolver;
use ruvector_solver::traits::SolverEngine;
use ruvector_solver::types::{Algorithm, ComputeBudget, CsrMatrix};
use helpers::{
compute_residual, dense_solve, f32_to_f64, l2_norm, random_laplacian_csr, random_spd_csr,
random_vector, relative_error,
};
// ---------------------------------------------------------------------------
// Helper: default compute budget
// ---------------------------------------------------------------------------
fn default_budget() -> ComputeBudget {
ComputeBudget {
max_time: std::time::Duration::from_secs(30),
max_iterations: 10_000,
tolerance: 1e-12,
}
}
// ---------------------------------------------------------------------------
// SPD system: solve and verify convergence
// ---------------------------------------------------------------------------
#[test]
fn test_cg_spd_system() {
let n = 15;
let matrix = random_spd_csr(n, 0.4, 42);
let rhs = random_vector(n, 43);
let budget = default_budget();
let solver = ConjugateGradientSolver::new(1e-8, 500, false);
let result = solver.solve(&matrix, &rhs, &budget).unwrap();
assert_eq!(result.algorithm, Algorithm::CG);
assert!(
result.residual_norm < 1e-4,
"residual too large: {}",
result.residual_norm
);
// Independent residual check.
let x = f32_to_f64(&result.solution);
let residual = compute_residual(&matrix, &x, &rhs);
let resid_norm = l2_norm(&residual);
assert!(
resid_norm < 1e-3,
"independent residual check: {}",
resid_norm
);
// Compare with dense solve.
let exact = dense_solve(&matrix, &rhs);
let rel_err = relative_error(&x, &exact);
assert!(rel_err < 1e-2, "relative error vs dense solve: {}", rel_err);
}
// ---------------------------------------------------------------------------
// Graph Laplacian system
// ---------------------------------------------------------------------------
#[test]
fn test_cg_laplacian() {
let n = 12;
let laplacian = random_laplacian_csr(n, 0.3, 44);
// Laplacians are singular (L * ones = 0), so we add a small regulariser
// to make it SPD: A = L + epsilon * I.
let epsilon = 0.01;
let mut entries: Vec<(usize, usize, f64)> = Vec::new();
for i in 0..n {
let start = laplacian.row_ptr[i];
let end = laplacian.row_ptr[i + 1];
for idx in start..end {
let j = laplacian.col_indices[idx];
let mut v = laplacian.values[idx];
if i == j {
v += epsilon;
}
entries.push((i, j, v));
}
}
let reg_laplacian = CsrMatrix::<f64>::from_coo(n, n, entries);
let rhs = random_vector(n, 45);
let budget = default_budget();
let solver = ConjugateGradientSolver::new(1e-8, 1000, false);
let result = solver.solve(&reg_laplacian, &rhs, &budget).unwrap();
assert!(
result.residual_norm < 1e-4,
"laplacian solve residual: {}",
result.residual_norm
);
// Verify Ax = b.
let x = f32_to_f64(&result.solution);
let residual = compute_residual(&reg_laplacian, &x, &rhs);
let resid_norm = l2_norm(&residual);
assert!(
resid_norm < 1e-3,
"laplacian residual check: {}",
resid_norm
);
}
// ---------------------------------------------------------------------------
// Preconditioned CG reduces iterations
// ---------------------------------------------------------------------------
#[test]
fn test_cg_preconditioned() {
let n = 30;
let matrix = random_spd_csr(n, 0.3, 46);
let rhs = random_vector(n, 47);
let budget = default_budget();
let unprecond = ConjugateGradientSolver::new(1e-8, 1000, false);
let precond = ConjugateGradientSolver::new(1e-8, 1000, true);
let result_no = unprecond.solve(&matrix, &rhs, &budget).unwrap();
let result_yes = precond.solve(&matrix, &rhs, &budget).unwrap();
// Both should converge.
assert!(
result_no.residual_norm < 1e-4,
"unpreconditioned residual: {}",
result_no.residual_norm
);
assert!(
result_yes.residual_norm < 1e-4,
"preconditioned residual: {}",
result_yes.residual_norm
);
// Preconditioner should take <= iterations (it won't always be strictly
// fewer on well-conditioned systems, but should not take more).
assert!(
result_yes.iterations <= result_no.iterations + 2,
"preconditioned ({}) should not take much more than unpreconditioned ({})",
result_yes.iterations,
result_no.iterations
);
}
// ---------------------------------------------------------------------------
// Known solution verification
// ---------------------------------------------------------------------------
#[test]
fn test_cg_known_solution() {
// Diagonal system D*x = b => x_i = b_i / d_i
let diag_vals = vec![2.0, 5.0, 10.0, 1.0];
let n = diag_vals.len();
let entries: Vec<(usize, usize, f64)> = diag_vals
.iter()
.enumerate()
.map(|(i, &d)| (i, i, d))
.collect();
let matrix = CsrMatrix::<f64>::from_coo(n, n, entries);
let rhs = vec![4.0, 15.0, 30.0, 7.0];
let expected = vec![2.0, 3.0, 3.0, 7.0]; // b_i / d_i
let budget = default_budget();
let solver = ConjugateGradientSolver::new(1e-10, 100, false);
let result = solver.solve(&matrix, &rhs, &budget).unwrap();
let x = f32_to_f64(&result.solution);
for i in 0..n {
assert_relative_eq!(x[i], expected[i], epsilon = 1e-4);
}
// Also test a tridiagonal system with known answer.
// A = [4 -1 0] b = [3] => solve manually:
// [-1 4 -1] [2] x0 = (3 + x1)/4
// [0 -1 4] [3] x2 = (3 + x1)/4 => x0 = x2 (by symmetry)
// x1 = (2 + x0 + x2)/4 = (2 + 2*x0)/4
// From row 0: x0 = (3 + x1)/4
// From row 1: x1 = (2 + 2*x0)/4 = (1 + x0)/2
// Sub: x0 = (3 + (1+x0)/2)/4 = (3.5 + x0/2)/4 = 7/8 + x0/8
// => 7x0/8 = 7/8 => x0 = 1, x1 = 1, x2 = 1
let tri = CsrMatrix::<f64>::from_coo(
3,
3,
vec![
(0, 0, 4.0),
(0, 1, -1.0),
(1, 0, -1.0),
(1, 1, 4.0),
(1, 2, -1.0),
(2, 1, -1.0),
(2, 2, 4.0),
],
);
let rhs_tri = vec![3.0, 2.0, 3.0];
let result_tri = solver.solve(&tri, &rhs_tri, &budget).unwrap();
let x_tri = f32_to_f64(&result_tri.solution);
for i in 0..3 {
assert_relative_eq!(x_tri[i], 1.0, epsilon = 1e-4);
}
}
// ---------------------------------------------------------------------------
// Tolerance levels: accuracy scales with epsilon
// ---------------------------------------------------------------------------
#[test]
fn test_cg_tolerance_levels() {
let n = 20;
let matrix = random_spd_csr(n, 0.3, 48);
let rhs = random_vector(n, 49);
let exact = dense_solve(&matrix, &rhs);
let budget = default_budget();
let tolerances = [1e-4, 1e-6, 1e-8, 1e-10];
let mut prev_error = f64::INFINITY;
for &tol in &tolerances {
let solver = ConjugateGradientSolver::new(tol, 5000, false);
let result = solver.solve(&matrix, &rhs, &budget).unwrap();
let x = f32_to_f64(&result.solution);
let rel_err = relative_error(&x, &exact);
// Error should generally decrease with tighter tolerance.
// Allow some slack for f32 precision limits at very tight tolerances.
assert!(
rel_err < tol.sqrt() * 100.0 || rel_err < prev_error * 10.0,
"tol={:.0e}: relative error {:.2e} is too large (prev={:.2e})",
tol,
rel_err,
prev_error
);
prev_error = rel_err;
}
}

View File

@@ -0,0 +1,325 @@
//! Integration tests for `CsrMatrix` — construction, SpMV, transpose, and
//! structural queries.
mod helpers;
use approx::assert_relative_eq;
use ruvector_solver::types::CsrMatrix;
// ---------------------------------------------------------------------------
// Construction from COO triplets
// ---------------------------------------------------------------------------
#[test]
fn test_csr_from_triplets() {
// 3x3 matrix:
// [ 2 -1 0 ]
// [-1 3 -1 ]
// [ 0 -1 2 ]
let triplets: Vec<(usize, usize, f64)> = vec![
(0, 0, 2.0),
(0, 1, -1.0),
(1, 0, -1.0),
(1, 1, 3.0),
(1, 2, -1.0),
(2, 1, -1.0),
(2, 2, 2.0),
];
let mat = CsrMatrix::<f64>::from_coo(3, 3, triplets);
assert_eq!(mat.rows, 3);
assert_eq!(mat.cols, 3);
assert_eq!(mat.values.len(), 7);
assert_eq!(mat.col_indices.len(), 7);
assert_eq!(mat.row_ptr.len(), 4); // rows + 1
// Verify row_ptr encodes correct row boundaries.
assert_eq!(mat.row_ptr[0], 0);
assert_eq!(mat.row_ptr[1], 2); // row 0 has 2 entries
assert_eq!(mat.row_ptr[2], 5); // row 1 has 3 entries
assert_eq!(mat.row_ptr[3], 7); // row 2 has 2 entries
// Verify row_degree for each row.
assert_eq!(mat.row_degree(0), 2);
assert_eq!(mat.row_degree(1), 3);
assert_eq!(mat.row_degree(2), 2);
}
// ---------------------------------------------------------------------------
// SpMV correctness vs dense multiply
// ---------------------------------------------------------------------------
#[test]
fn test_csr_spmv() {
// A = [ 2 -1 0 ] x = [1] Ax = [ 2 - 1 + 0 ] = [ 1 ]
// [-1 3 -1 ] [1] [-1 + 3 - 1 ] = [ 1 ]
// [ 0 -1 2 ] [1] [ 0 - 1 + 2 ] = [ 1 ]
let mat = CsrMatrix::<f64>::from_coo(
3,
3,
vec![
(0, 0, 2.0),
(0, 1, -1.0),
(1, 0, -1.0),
(1, 1, 3.0),
(1, 2, -1.0),
(2, 1, -1.0),
(2, 2, 2.0),
],
);
let x = vec![1.0, 1.0, 1.0];
let mut y = vec![0.0f64; 3];
mat.spmv(&x, &mut y);
assert_relative_eq!(y[0], 1.0, epsilon = 1e-12);
assert_relative_eq!(y[1], 1.0, epsilon = 1e-12);
assert_relative_eq!(y[2], 1.0, epsilon = 1e-12);
// Non-trivial x.
let x2 = vec![1.0, 2.0, 3.0];
let mut y2 = vec![0.0f64; 3];
mat.spmv(&x2, &mut y2);
// Manual: A*[1,2,3] = [2*1 + (-1)*2, -1*1 + 3*2 + (-1)*3, (-1)*2 + 2*3]
// = [0, 2, 4]
assert_relative_eq!(y2[0], 0.0, epsilon = 1e-12);
assert_relative_eq!(y2[1], 2.0, epsilon = 1e-12);
assert_relative_eq!(y2[2], 4.0, epsilon = 1e-12);
}
// ---------------------------------------------------------------------------
// Density calculation
// ---------------------------------------------------------------------------
#[test]
fn test_csr_density() {
// 4x4 matrix with 6 non-zero entries
let mat = CsrMatrix::<f64>::from_coo(
4,
4,
vec![
(0, 0, 1.0),
(1, 1, 2.0),
(2, 2, 3.0),
(3, 3, 4.0),
(0, 1, 0.5),
(1, 0, 0.5),
],
);
let nnz = mat.values.len();
let total_entries = mat.rows * mat.cols;
let density = nnz as f64 / total_entries as f64;
assert_eq!(nnz, 6);
assert_relative_eq!(density, 6.0 / 16.0, epsilon = 1e-12);
}
// ---------------------------------------------------------------------------
// Transpose correctness
// ---------------------------------------------------------------------------
#[test]
fn test_csr_transpose() {
// Non-symmetric matrix:
// A = [ 1 2 0 ] A^T = [ 1 0 3 ]
// [ 0 3 0 ] [ 2 3 0 ]
// [ 3 0 4 ] [ 0 0 4 ]
let mat = CsrMatrix::<f64>::from_coo(
3,
3,
vec![
(0, 0, 1.0),
(0, 1, 2.0),
(1, 1, 3.0),
(2, 0, 3.0),
(2, 2, 4.0),
],
);
let at = mat.transpose();
assert_eq!(at.rows, 3);
assert_eq!(at.cols, 3);
assert_eq!(at.values.len(), 5);
// Verify A^T * e_0. Since A^T[i][j] = A[j][i], the first column of A^T
// is the first row of A: [1, 2, 0].
let e0 = vec![1.0, 0.0, 0.0];
let mut y = vec![0.0f64; 3];
at.spmv(&e0, &mut y);
// (A^T * e_0)[i] = A^T[i][0] = A[0][i], so [A[0][0], A[0][1], A[0][2]] = [1, 2, 0]
assert_relative_eq!(y[0], 1.0, epsilon = 1e-12);
assert_relative_eq!(y[1], 2.0, epsilon = 1e-12);
assert_relative_eq!(y[2], 0.0, epsilon = 1e-12);
// Verify transpose of transpose recovers the original.
let att = at.transpose();
let x = vec![1.0, 2.0, 3.0];
let mut y_orig = vec![0.0f64; 3];
let mut y_double = vec![0.0f64; 3];
mat.spmv(&x, &mut y_orig);
att.spmv(&x, &mut y_double);
for i in 0..3 {
assert_relative_eq!(y_orig[i], y_double[i], epsilon = 1e-12);
}
}
// ---------------------------------------------------------------------------
// Empty matrix handling
// ---------------------------------------------------------------------------
#[test]
fn test_csr_empty() {
let mat = CsrMatrix::<f64>::from_coo(0, 0, Vec::<(usize, usize, f64)>::new());
assert_eq!(mat.rows, 0);
assert_eq!(mat.cols, 0);
assert_eq!(mat.values.len(), 0);
assert_eq!(mat.row_ptr.len(), 1); // [0]
assert_eq!(mat.row_ptr[0], 0);
// SpMV on empty should work trivially (no-op).
let x: Vec<f64> = vec![];
let mut y: Vec<f64> = vec![];
mat.spmv(&x, &mut y);
// Transpose of empty is empty.
let at = mat.transpose();
assert_eq!(at.rows, 0);
assert_eq!(at.cols, 0);
// Matrix with rows but no entries.
let mat2 = CsrMatrix::<f64>::from_coo(3, 3, Vec::<(usize, usize, f64)>::new());
assert_eq!(mat2.values.len(), 0);
let mut y2 = vec![0.0f64; 3];
mat2.spmv(&[1.0, 2.0, 3.0], &mut y2);
for &v in &y2 {
assert_relative_eq!(v, 0.0, epsilon = 1e-15);
}
}
// ---------------------------------------------------------------------------
// Identity matrix: SpMV(I, x) = x
// ---------------------------------------------------------------------------
#[test]
fn test_csr_identity() {
for n in [1, 5, 20, 100] {
let identity = CsrMatrix::<f64>::identity(n);
assert_eq!(identity.rows, n);
assert_eq!(identity.cols, n);
assert_eq!(identity.values.len(), n);
// Generate a deterministic test vector.
let x: Vec<f64> = (0..n).map(|i| (i as f64 + 1.0) * 0.7).collect();
let mut y = vec![0.0f64; n];
identity.spmv(&x, &mut y);
for i in 0..n {
assert_relative_eq!(y[i], x[i], epsilon = 1e-12);
}
}
}
// ---------------------------------------------------------------------------
// Diagonal matrix correctness
// ---------------------------------------------------------------------------
#[test]
fn test_csr_diagonal() {
let diag_vals = vec![2.0, 3.0, 5.0, 7.0];
let n = diag_vals.len();
let entries: Vec<(usize, usize, f64)> = diag_vals
.iter()
.enumerate()
.map(|(i, &v)| (i, i, v))
.collect();
let mat = CsrMatrix::<f64>::from_coo(n, n, entries);
// D * x = element-wise product of diag and x.
let x = vec![1.0, 2.0, 3.0, 4.0];
let mut y = vec![0.0f64; n];
mat.spmv(&x, &mut y);
for i in 0..n {
assert_relative_eq!(y[i], diag_vals[i] * x[i], epsilon = 1e-12);
}
// Verify each row has exactly 1 non-zero.
for i in 0..n {
assert_eq!(mat.row_degree(i), 1);
}
// Transpose of diagonal is the same matrix.
let dt = mat.transpose();
let mut yt = vec![0.0f64; n];
dt.spmv(&x, &mut yt);
for i in 0..n {
assert_relative_eq!(y[i], yt[i], epsilon = 1e-12);
}
}
// ---------------------------------------------------------------------------
// Symmetric detection (structural)
// ---------------------------------------------------------------------------
#[test]
fn test_csr_symmetric() {
// Symmetric matrix.
let sym = CsrMatrix::<f64>::from_coo(
3,
3,
vec![
(0, 0, 2.0),
(0, 1, 1.0),
(1, 0, 1.0),
(1, 1, 3.0),
(1, 2, 1.0),
(2, 1, 1.0),
(2, 2, 2.0),
],
);
// Check symmetry by comparing A and A^T via SpMV.
let at = sym.transpose();
let x = vec![1.0, 2.0, 3.0];
let mut y_a = vec![0.0f64; 3];
let mut y_at = vec![0.0f64; 3];
sym.spmv(&x, &mut y_a);
at.spmv(&x, &mut y_at);
for i in 0..3 {
assert_relative_eq!(y_a[i], y_at[i], epsilon = 1e-12);
}
// Use the orchestrator's sparsity analysis to check symmetry detection.
let profile = ruvector_solver::router::SolverOrchestrator::analyze_sparsity(&sym);
assert!(
profile.is_symmetric_structure,
"symmetric matrix should be detected as symmetric"
);
// Non-symmetric matrix.
let asym = CsrMatrix::<f64>::from_coo(
3,
3,
vec![
(0, 0, 1.0),
(0, 1, 2.0),
// no (1, 0) entry
(1, 1, 3.0),
(2, 2, 4.0),
],
);
let profile_asym = ruvector_solver::router::SolverOrchestrator::analyze_sparsity(&asym);
assert!(
!profile_asym.is_symmetric_structure,
"asymmetric matrix should not be detected as symmetric"
);
}

View File

@@ -0,0 +1,234 @@
//! Integration tests for the Neumann series solver.
//!
//! The Neumann solver solves Ax = b by iterating x_{k+1} = b + (I - A) x_k.
//! Convergence requires spectral radius rho(I - A) < 1, which is guaranteed
//! for diagonally dominant systems.
mod helpers;
use approx::assert_relative_eq;
use ruvector_solver::error::SolverError;
use ruvector_solver::neumann::NeumannSolver;
use ruvector_solver::traits::SolverEngine;
use ruvector_solver::types::{Algorithm, ComputeBudget, CsrMatrix};
use helpers::{
compute_residual, dense_solve, f32_to_f64, l2_norm, random_diag_dominant_csr, random_vector,
relative_error,
};
// ---------------------------------------------------------------------------
// Helper: call solver via the SolverEngine trait (f64 interface)
// ---------------------------------------------------------------------------
fn solve_via_trait(
solver: &NeumannSolver,
matrix: &CsrMatrix<f64>,
rhs: &[f64],
budget: &ComputeBudget,
) -> Result<ruvector_solver::types::SolverResult, SolverError> {
SolverEngine::solve(solver, matrix, rhs, budget)
}
// ---------------------------------------------------------------------------
// Solve a diagonally dominant system, verify ||Ax - b|| < eps
// ---------------------------------------------------------------------------
#[test]
fn test_neumann_diagonal_dominant() {
let n = 20;
let matrix = random_diag_dominant_csr(n, 0.3, 42);
let rhs = random_vector(n, 43);
let budget = ComputeBudget::default();
let solver = NeumannSolver::new(1e-8, 500);
let result = solve_via_trait(&solver, &matrix, &rhs, &budget).unwrap();
assert!(
result.residual_norm < 1e-6,
"residual too large: {}",
result.residual_norm
);
// Double-check by computing residual independently.
let x = f32_to_f64(&result.solution);
let residual = compute_residual(&matrix, &x, &rhs);
let resid_norm = l2_norm(&residual);
assert!(
resid_norm < 1e-4,
"independent residual check failed: {}",
resid_norm
);
// Compare with dense solve.
let exact = dense_solve(&matrix, &rhs);
let rel_err = relative_error(&x, &exact);
assert!(rel_err < 1e-3, "relative error vs dense solve: {}", rel_err);
}
// ---------------------------------------------------------------------------
// Verify geometric convergence rate
// ---------------------------------------------------------------------------
#[test]
fn test_neumann_convergence_rate() {
let n = 15;
let matrix = random_diag_dominant_csr(n, 0.2, 44);
let rhs = random_vector(n, 45);
let budget = ComputeBudget::default();
let solver = NeumannSolver::new(1e-12, 500);
let result = solve_via_trait(&solver, &matrix, &rhs, &budget).unwrap();
// The convergence history should show monotonic decrease (geometric).
let history = &result.convergence_history;
assert!(
history.len() >= 3,
"need at least 3 iterations for rate check"
);
// Check that residual decreases monotonically for at least the first
// several iterations (allowing a small tolerance for floating point).
let mut decreasing_count = 0;
for w in history.windows(2) {
if w[1].residual_norm < w[0].residual_norm * 1.01 {
decreasing_count += 1;
}
}
let decrease_ratio = decreasing_count as f64 / (history.len() - 1) as f64;
assert!(
decrease_ratio > 0.8,
"expected mostly decreasing residuals, got {:.0}% decreasing",
decrease_ratio * 100.0
);
// Estimate the convergence factor from the last few iterations.
if history.len() >= 4 {
let n_hist = history.len();
let r_late = history[n_hist - 1].residual_norm;
let r_early = history[n_hist - 4].residual_norm;
if r_early > 1e-15 {
let avg_factor = (r_late / r_early).powf(1.0 / 3.0);
assert!(
avg_factor < 1.0,
"convergence factor should be < 1, got {}",
avg_factor
);
}
}
}
// ---------------------------------------------------------------------------
// Verify rejection when spectral radius >= 1
// ---------------------------------------------------------------------------
#[test]
fn test_neumann_spectral_radius_check() {
// Build a matrix where off-diagonal entries dominate the diagonal,
// giving spectral radius >= 1. The matrix:
// [1 2]
// [2 1]
// has off-diag sum > diag for each row.
let matrix = CsrMatrix::<f64>::from_coo(
2,
2,
vec![(0, 0, 1.0), (0, 1, 2.0), (1, 0, 2.0), (1, 1, 1.0)],
);
let rhs = vec![1.0, 1.0];
let budget = ComputeBudget::default();
let solver = NeumannSolver::new(1e-8, 100);
let result = solve_via_trait(&solver, &matrix, &rhs, &budget);
match result {
Err(SolverError::SpectralRadiusExceeded {
spectral_radius,
limit,
algorithm,
}) => {
assert!(spectral_radius >= 1.0);
assert_relative_eq!(limit, 1.0, epsilon = 1e-12);
assert_eq!(algorithm, Algorithm::Neumann);
}
Err(SolverError::NumericalInstability { .. }) => {
// Also acceptable: the solver might detect NaN divergence
// before the spectral radius check catches it.
}
other => panic!(
"expected SpectralRadiusExceeded or NumericalInstability, got {:?}",
other
),
}
}
// ---------------------------------------------------------------------------
// Identity system: Ix = b should converge in 1 iteration
// ---------------------------------------------------------------------------
#[test]
fn test_neumann_identity_system() {
for n in [1, 5, 20] {
let matrix = CsrMatrix::<f64>::identity(n);
let rhs: Vec<f64> = (0..n).map(|i| (i as f64 + 1.0) * 0.3).collect();
let budget = ComputeBudget::default();
let solver = NeumannSolver::new(1e-10, 100);
let result = solve_via_trait(&solver, &matrix, &rhs, &budget).unwrap();
// Solution should equal rhs for identity matrix.
let x = f32_to_f64(&result.solution);
for i in 0..n {
assert_relative_eq!(x[i], rhs[i], epsilon = 1e-5);
}
// Should converge in exactly 1 iteration for identity.
assert_eq!(
result.iterations, 1,
"identity system should converge in 1 iteration, got {}",
result.iterations
);
assert_eq!(result.algorithm, Algorithm::Neumann);
}
}
// ---------------------------------------------------------------------------
// Manually constructed system with known solution
// ---------------------------------------------------------------------------
#[test]
fn test_neumann_known_solution() {
// System:
// [3 -1 0] [x0] [2]
// [-1 3 -1] [x1] = [0]
// [0 -1 3] [x2] [2]
//
// By symmetry, x0 = x2. From row 0: 3*x0 - x1 = 2.
// From row 1: -x0 + 3*x1 - x2 = 0 => -2*x0 + 3*x1 = 0 => x1 = 2*x0/3.
// Substituting: 3*x0 - 2*x0/3 = 2 => (9 - 2)/3 * x0 = 2 => x0 = 6/7.
// x1 = 4/7, x2 = 6/7.
let matrix = CsrMatrix::<f64>::from_coo(
3,
3,
vec![
(0, 0, 3.0),
(0, 1, -1.0),
(1, 0, -1.0),
(1, 1, 3.0),
(1, 2, -1.0),
(2, 1, -1.0),
(2, 2, 3.0),
],
);
let rhs = vec![2.0, 0.0, 2.0];
let budget = ComputeBudget::default();
let solver = NeumannSolver::new(1e-10, 500);
let result = solve_via_trait(&solver, &matrix, &rhs, &budget).unwrap();
let x = f32_to_f64(&result.solution);
let expected = [6.0 / 7.0, 4.0 / 7.0, 6.0 / 7.0];
for i in 0..3 {
assert_relative_eq!(x[i], expected[i], epsilon = 1e-4);
}
}

View File

@@ -0,0 +1,281 @@
//! Integration tests for Forward Push, Backward Push, and mass conservation.
//!
//! Tests cover PPR computation on small graphs, star/complete topologies,
//! mass conservation invariants, and agreement between forward and backward
//! push algorithms.
mod helpers;
use approx::assert_relative_eq;
#[cfg(feature = "backward-push")]
use ruvector_solver::backward_push::BackwardPushSolver;
use ruvector_solver::forward_push::{forward_push_with_residuals, ForwardPushSolver};
#[allow(unused_imports)]
use ruvector_solver::traits::SublinearPageRank;
use ruvector_solver::types::CsrMatrix;
use helpers::adjacency_from_edges;
// ---------------------------------------------------------------------------
// Helper: build common graph topologies
// ---------------------------------------------------------------------------
/// 4-node graph: 0--1--2--3, 0--2 (bidirectional).
fn simple_graph_4() -> CsrMatrix<f64> {
adjacency_from_edges(4, &[(0, 1), (1, 2), (2, 3), (0, 2)])
}
/// Star graph centred at 0 with k leaves (bidirectional edges).
fn star_graph(k: usize) -> CsrMatrix<f64> {
let n = k + 1;
let edges: Vec<(usize, usize)> = (1..n).map(|i| (0, i)).collect();
adjacency_from_edges(n, &edges)
}
/// Complete graph on n vertices (bidirectional edges, no self-loops).
fn complete_graph(n: usize) -> CsrMatrix<f64> {
let mut edges = Vec::new();
for i in 0..n {
for j in (i + 1)..n {
edges.push((i, j));
}
}
adjacency_from_edges(n, &edges)
}
/// Directed cycle: 0->1->2->...->n-1->0.
fn directed_cycle(n: usize) -> CsrMatrix<f64> {
let entries: Vec<(usize, usize, f64)> = (0..n).map(|i| (i, (i + 1) % n, 1.0f64)).collect();
CsrMatrix::<f64>::from_coo(n, n, entries)
}
// ---------------------------------------------------------------------------
// Forward Push: 4-node graph
// ---------------------------------------------------------------------------
#[test]
fn test_forward_push_simple_graph() {
let graph = simple_graph_4();
let solver = ForwardPushSolver::new(0.85, 1e-8);
let result = solver.ppr_from_source(&graph, 0).unwrap();
// Source should have highest PPR.
assert!(!result.is_empty());
assert_eq!(result[0].0, 0, "source vertex should be ranked first");
assert!(result[0].1 > 0.0);
// All returned scores should be positive and sorted descending.
for w in result.windows(2) {
assert!(
w[0].1 >= w[1].1,
"results should be sorted descending: {} < {}",
w[0].1,
w[1].1
);
}
// Verify all 4 nodes get some probability.
let nodes: Vec<usize> = result.iter().map(|(v, _)| *v).collect();
for v in 0..4 {
assert!(
nodes.contains(&v),
"node {} should appear in PPR results",
v
);
}
// Neighbours of source should have more PPR than non-neighbours.
let ppr: Vec<f64> = {
let mut dense = vec![0.0f64; 4];
for &(v, s) in &result {
dense[v] = s;
}
dense
};
// 0 is connected to 1 and 2. 3 is only reachable through 2.
assert!(
ppr[1] > ppr[3] || ppr[2] > ppr[3],
"direct neighbours should have higher PPR than distant nodes"
);
}
// ---------------------------------------------------------------------------
// Forward Push: star graph — center should dominate
// ---------------------------------------------------------------------------
#[test]
fn test_forward_push_star_graph() {
let graph = star_graph(5); // 6 nodes: center=0, leaves=1..5
let solver = ForwardPushSolver::new(0.85, 1e-8);
// PPR from center: center should have highest score.
let result = solver.ppr_from_source(&graph, 0).unwrap();
assert_eq!(result[0].0, 0);
// All leaf scores should be approximately equal (by symmetry).
let leaf_scores: Vec<f64> = result
.iter()
.filter(|(v, _)| *v != 0)
.map(|(_, s)| *s)
.collect();
assert_eq!(leaf_scores.len(), 5);
let mean = leaf_scores.iter().sum::<f64>() / leaf_scores.len() as f64;
for &s in &leaf_scores {
assert_relative_eq!(s, mean, epsilon = 1e-6);
}
// Center PPR should be strictly higher than any leaf.
for &s in &leaf_scores {
assert!(result[0].1 > s, "center PPR should exceed leaf PPR");
}
}
// ---------------------------------------------------------------------------
// Forward Push: complete graph — approximately uniform
// ---------------------------------------------------------------------------
#[test]
fn test_forward_push_complete_graph() {
let n = 5;
let graph = complete_graph(n);
let solver = ForwardPushSolver::new(0.85, 1e-8);
let result = solver.ppr_from_source(&graph, 0).unwrap();
// All n nodes should appear.
assert_eq!(result.len(), n);
// Non-source nodes should have approximately equal PPR.
let non_source: Vec<f64> = result
.iter()
.filter(|(v, _)| *v != 0)
.map(|(_, s)| *s)
.collect();
let mean = non_source.iter().sum::<f64>() / non_source.len() as f64;
for &s in &non_source {
assert_relative_eq!(s, mean, epsilon = 1e-6);
}
}
// ---------------------------------------------------------------------------
// Forward Push: mass conservation
// ---------------------------------------------------------------------------
#[test]
fn test_forward_push_mass_conservation() {
let graph = simple_graph_4();
let (p, r) = forward_push_with_residuals(&graph, 0, 0.85, 1e-8).unwrap();
let total_p: f64 = p.iter().sum();
let total_r: f64 = r.iter().sum();
let total = total_p + total_r;
assert_relative_eq!(total, 1.0, epsilon = 1e-6);
// Also verify on star graph.
let star = star_graph(4);
let (p2, r2) = forward_push_with_residuals(&star, 0, 0.85, 1e-6).unwrap();
let total2 = p2.iter().sum::<f64>() + r2.iter().sum::<f64>();
assert_relative_eq!(total2, 1.0, epsilon = 1e-5);
// And on directed cycle.
let cycle = directed_cycle(6);
let (p3, r3) = forward_push_with_residuals(&cycle, 0, 0.85, 1e-6).unwrap();
let total3 = p3.iter().sum::<f64>() + r3.iter().sum::<f64>();
assert_relative_eq!(total3, 1.0, epsilon = 1e-5);
}
// ---------------------------------------------------------------------------
// Backward Push: simple verification
// ---------------------------------------------------------------------------
#[cfg(feature = "backward-push")]
#[test]
fn test_backward_push_simple() {
let graph = directed_cycle(4); // 0->1->2->3->0
let solver = BackwardPushSolver::new(0.15, 1e-6);
// Backward push to target 0: nodes that can reach 0 should have PPR.
let result = solver.ppr_to_target(&graph, 0).unwrap();
assert!(!result.is_empty());
// The target node itself should have the highest PPR.
let target_ppr = result
.iter()
.find(|&&(v, _)| v == 0)
.map(|&(_, p)| p)
.unwrap_or(0.0);
assert!(target_ppr > 0.0, "target should have positive PPR");
// Total PPR should be <= 1.
let total: f64 = result.iter().map(|(_, v)| v).sum();
assert!(
total <= 1.0 + 1e-6,
"total PPR should be <= 1, got {}",
total
);
}
// ---------------------------------------------------------------------------
// Random walk pairwise: forward and backward push should agree
// ---------------------------------------------------------------------------
#[cfg(feature = "backward-push")]
#[test]
fn test_random_walk_pairwise() {
// On a symmetric graph, forward push from s and backward push to s
// should produce similar PPR distributions (up to algorithm variance).
let graph = complete_graph(5);
let forward = ForwardPushSolver::new(0.15, 1e-8);
let backward = BackwardPushSolver::new(0.15, 1e-8);
let source = 0;
// Forward push from source 0.
let fwd_result = forward.ppr_from_source(&graph, source).unwrap();
let mut fwd_ppr = vec![0.0f64; 5];
for &(v, s) in &fwd_result {
fwd_ppr[v] = s;
}
// Backward push to target 0.
let bwd_result = backward.ppr_to_target(&graph, source).unwrap();
let mut bwd_ppr = vec![0.0f64; 5];
for &(v, s) in &bwd_result {
bwd_ppr[v] = s;
}
// On a symmetric complete graph, forward PPR(0 -> v) should equal
// backward PPR(v -> 0), which is what backward push computes.
// The self-PPR (source=target=0) should match closely.
let fwd_self = fwd_ppr[0];
let bwd_self = bwd_ppr[0];
// They should agree to reasonable precision on a symmetric graph.
let self_ppr_diff = (fwd_self - bwd_self).abs();
assert!(
self_ppr_diff < 0.1,
"self-PPR should agree: forward={}, backward={}, diff={}",
fwd_self,
bwd_self,
self_ppr_diff
);
// Non-source nodes should have similar PPR in both directions
// (by symmetry of the complete graph).
for v in 1..5 {
let diff = (fwd_ppr[v] - bwd_ppr[v]).abs();
assert!(
diff < 0.1,
"PPR for node {} should agree: forward={}, backward={}, diff={}",
v,
fwd_ppr[v],
bwd_ppr[v],
diff
);
}
}

View File

@@ -0,0 +1,245 @@
//! Integration tests for the algorithm router and solver orchestrator.
//!
//! Tests cover routing decisions (Neumann for diag-dominant, CG for general
//! SPD, ForwardPush for PageRank), and the fallback chain behaviour.
mod helpers;
use ruvector_solver::router::{RouterConfig, SolverOrchestrator, SolverRouter};
use ruvector_solver::types::{Algorithm, ComputeBudget, CsrMatrix, QueryType, SparsityProfile};
use helpers::{random_diag_dominant_csr, random_spd_csr, random_vector};
// ---------------------------------------------------------------------------
// Helper: default compute budget
// ---------------------------------------------------------------------------
fn default_budget() -> ComputeBudget {
ComputeBudget {
max_time: std::time::Duration::from_secs(30),
max_iterations: 10_000,
tolerance: 1e-8,
}
}
// ---------------------------------------------------------------------------
// Router selects Neumann for diag-dominant + sparse + low spectral radius
// ---------------------------------------------------------------------------
#[test]
fn test_router_selects_neumann_for_diag_dominant() {
let router = SolverRouter::new(RouterConfig::default());
// Construct a profile that satisfies all Neumann conditions:
// - diag-dominant
// - density below sparsity_sublinear_threshold (0.05)
// - spectral radius below neumann_spectral_radius_threshold (0.95)
let profile = SparsityProfile {
rows: 1000,
cols: 1000,
nnz: 3000,
density: 0.003,
is_diag_dominant: true,
estimated_spectral_radius: 0.5,
estimated_condition: 10.0,
is_symmetric_structure: true,
avg_nnz_per_row: 3.0,
max_nnz_per_row: 5,
};
let algo = router.select_algorithm(&profile, &QueryType::LinearSystem);
assert_eq!(
algo,
Algorithm::Neumann,
"diag-dominant, sparse, low spectral radius should route to Neumann"
);
// Also verify with a real matrix: build a diag-dominant matrix and check
// that the orchestrator's analyze_sparsity reports diag-dominance.
let matrix = random_diag_dominant_csr(20, 0.2, 42);
let real_profile = SolverOrchestrator::analyze_sparsity(&matrix);
assert!(
real_profile.is_diag_dominant,
"random_diag_dominant_csr should produce a diag-dominant matrix"
);
assert!(
real_profile.estimated_spectral_radius < 1.0,
"spectral radius should be < 1 for diag-dominant matrix"
);
}
// ---------------------------------------------------------------------------
// Router selects CG for well-conditioned, non-diag-dominant systems
// ---------------------------------------------------------------------------
#[test]
fn test_router_selects_cg_for_general_spd() {
let router = SolverRouter::new(RouterConfig::default());
// Profile: not diag-dominant, but well-conditioned (condition < 100).
let profile = SparsityProfile {
rows: 500,
cols: 500,
nnz: 25_000,
density: 0.10,
is_diag_dominant: false,
estimated_spectral_radius: 0.8,
estimated_condition: 50.0,
is_symmetric_structure: true,
avg_nnz_per_row: 50.0,
max_nnz_per_row: 80,
};
let algo = router.select_algorithm(&profile, &QueryType::LinearSystem);
assert_eq!(
algo,
Algorithm::CG,
"well-conditioned, non-diag-dominant should route to CG"
);
// When condition number exceeds the threshold, should route to BMSSP.
let ill_conditioned = SparsityProfile {
estimated_condition: 500.0,
..profile.clone()
};
let algo_ill = router.select_algorithm(&ill_conditioned, &QueryType::LinearSystem);
assert_eq!(
algo_ill,
Algorithm::BMSSP,
"ill-conditioned should route to BMSSP"
);
}
// ---------------------------------------------------------------------------
// Router selects ForwardPush for PageRank queries
// ---------------------------------------------------------------------------
#[test]
fn test_router_selects_push_for_pagerank() {
let router = SolverRouter::new(RouterConfig::default());
let profile = SparsityProfile {
rows: 5000,
cols: 5000,
nnz: 20_000,
density: 0.0008,
is_diag_dominant: false,
estimated_spectral_radius: 0.85,
estimated_condition: 100.0,
is_symmetric_structure: false,
avg_nnz_per_row: 4.0,
max_nnz_per_row: 50,
};
// Single-source PageRank always routes to ForwardPush.
let algo_single = router.select_algorithm(&profile, &QueryType::PageRankSingle { source: 0 });
assert_eq!(
algo_single,
Algorithm::ForwardPush,
"single-source PageRank should route to ForwardPush"
);
// Pairwise on a large graph (rows > push_graph_size_threshold = 1000)
// routes to HybridRandomWalk.
let algo_pairwise_large = router.select_algorithm(
&profile,
&QueryType::PageRankPairwise {
source: 0,
target: 100,
},
);
assert_eq!(
algo_pairwise_large,
Algorithm::HybridRandomWalk,
"pairwise PageRank on large graph should route to HybridRandomWalk"
);
// Pairwise on a small graph routes to ForwardPush.
let small_profile = SparsityProfile {
rows: 500,
cols: 500,
nnz: 2000,
density: 0.008,
..profile.clone()
};
let algo_pairwise_small = router.select_algorithm(
&small_profile,
&QueryType::PageRankPairwise {
source: 0,
target: 10,
},
);
assert_eq!(
algo_pairwise_small,
Algorithm::ForwardPush,
"pairwise PageRank on small graph should route to ForwardPush"
);
}
// ---------------------------------------------------------------------------
// Fallback chain: if first algorithm fails, falls back to CG then Dense
// ---------------------------------------------------------------------------
#[test]
fn test_router_fallback_chain() {
let orchestrator = SolverOrchestrator::new(RouterConfig::default());
// Build a well-conditioned SPD system that is solvable.
// Use a simple diag-dominant tridiagonal so all algorithms can solve it.
let matrix = CsrMatrix::<f64>::from_coo(
4,
4,
vec![
(0, 0, 4.0),
(0, 1, -1.0),
(1, 0, -1.0),
(1, 1, 4.0),
(1, 2, -1.0),
(2, 1, -1.0),
(2, 2, 4.0),
(2, 3, -1.0),
(3, 2, -1.0),
(3, 3, 4.0),
],
);
let rhs = vec![1.0, 0.0, 0.0, 1.0];
let budget = default_budget();
// solve_with_fallback should succeed regardless of which algorithm is
// tried first (the fallback chain will eventually reach CG or Dense).
let result = orchestrator
.solve_with_fallback(&matrix, &rhs, QueryType::LinearSystem, &budget)
.unwrap();
assert!(
result.residual_norm < 1e-4,
"fallback chain should produce a good solution, residual={}",
result.residual_norm
);
// Verify the fallback chain deduplication: CG primary should give [CG, Dense].
// Neumann primary should give [Neumann, CG, Dense].
let profile = SolverOrchestrator::analyze_sparsity(&matrix);
let selected = orchestrator
.router()
.select_algorithm(&profile, &QueryType::LinearSystem);
// The selected algorithm for a diag-dominant sparse low-rho matrix should
// be Neumann, and the fallback chain should include CG and Dense.
// Just verify the solve succeeded, which proves fallback works end-to-end.
assert!(result.solution.len() == 4, "solution should have 4 entries");
// Test that solve_with_fallback also works on an SPD system that routes
// to CG. The fallback chain [CG, Dense] should handle it.
let spd = random_spd_csr(10, 0.3, 42);
let rhs2 = random_vector(10, 43);
let result2 = orchestrator
.solve_with_fallback(&spd, &rhs2, QueryType::LinearSystem, &budget)
.unwrap();
assert!(
result2.residual_norm < 1e-3,
"fallback on SPD should converge, residual={}",
result2.residual_norm
);
}

View File

@@ -0,0 +1,299 @@
//! Integration tests for input validation.
//!
//! Tests cover rejection of NaN and Inf values, dimension mismatches,
//! malformed CSR structures (non-monotonic row_ptrs), and oversized inputs.
use ruvector_solver::error::ValidationError;
use ruvector_solver::types::CsrMatrix;
use ruvector_solver::validation::{
validate_csr_matrix, validate_rhs, validate_solver_input, MAX_NODES,
};
// ---------------------------------------------------------------------------
// Helper: build a valid f32 identity matrix
// ---------------------------------------------------------------------------
fn identity_f32(n: usize) -> CsrMatrix<f32> {
let row_ptr: Vec<usize> = (0..=n).collect();
let col_indices: Vec<usize> = (0..n).collect();
let values = vec![1.0f32; n];
CsrMatrix {
row_ptr,
col_indices,
values,
rows: n,
cols: n,
}
}
/// Build a small valid 3x3 f32 CSR matrix for testing.
fn valid_3x3_f32() -> CsrMatrix<f32> {
CsrMatrix::<f32>::from_coo(
3,
3,
vec![
(0, 0, 2.0),
(0, 1, -0.5),
(1, 0, -0.5),
(1, 1, 2.0),
(1, 2, -0.5),
(2, 1, -0.5),
(2, 2, 2.0),
],
)
}
// ---------------------------------------------------------------------------
// Reject NaN in matrix values
// ---------------------------------------------------------------------------
#[test]
fn test_reject_nan_input() {
// NaN in matrix values should be rejected.
let mut mat = identity_f32(4);
mat.values[2] = f32::NAN;
let err = validate_csr_matrix(&mat).unwrap_err();
assert!(
matches!(err, ValidationError::NonFiniteValue(_)),
"expected NonFiniteValue for NaN in matrix, got {:?}",
err
);
// NaN in RHS should be rejected.
let rhs = vec![1.0f32, f32::NAN, 3.0];
let err_rhs = validate_rhs(&rhs, 3).unwrap_err();
assert!(
matches!(err_rhs, ValidationError::NonFiniteValue(_)),
"expected NonFiniteValue for NaN in RHS, got {:?}",
err_rhs
);
// Combined validation should also catch NaN in matrix.
let mut mat2 = valid_3x3_f32();
mat2.values[0] = f32::NAN;
let err_combined = validate_solver_input(&mat2, &[1.0, 2.0, 3.0], 1e-6).unwrap_err();
assert!(
matches!(err_combined, ValidationError::NonFiniteValue(_)),
"combined validation should reject NaN matrix values"
);
}
// ---------------------------------------------------------------------------
// Reject Inf values
// ---------------------------------------------------------------------------
#[test]
fn test_reject_inf_input() {
// Positive infinity in matrix values.
let mut mat = identity_f32(3);
mat.values[0] = f32::INFINITY;
let err = validate_csr_matrix(&mat).unwrap_err();
assert!(
matches!(err, ValidationError::NonFiniteValue(_)),
"expected NonFiniteValue for +Inf in matrix, got {:?}",
err
);
// Negative infinity in matrix values.
let mut mat2 = identity_f32(3);
mat2.values[1] = f32::NEG_INFINITY;
let err2 = validate_csr_matrix(&mat2).unwrap_err();
assert!(
matches!(err2, ValidationError::NonFiniteValue(_)),
"expected NonFiniteValue for -Inf in matrix, got {:?}",
err2
);
// Infinity in RHS.
let rhs = vec![1.0f32, f32::INFINITY, 3.0];
let err_rhs = validate_rhs(&rhs, 3).unwrap_err();
assert!(
matches!(err_rhs, ValidationError::NonFiniteValue(_)),
"expected NonFiniteValue for Inf in RHS, got {:?}",
err_rhs
);
// Negative infinity in RHS.
let rhs_neg = vec![f32::NEG_INFINITY, 2.0, 3.0];
let err_neg = validate_rhs(&rhs_neg, 3).unwrap_err();
assert!(
matches!(err_neg, ValidationError::NonFiniteValue(_)),
"expected NonFiniteValue for -Inf in RHS, got {:?}",
err_neg
);
}
// ---------------------------------------------------------------------------
// Reject dimension mismatch: rhs length != matrix rows
// ---------------------------------------------------------------------------
#[test]
fn test_reject_dimension_mismatch() {
// RHS too short.
let err_short = validate_rhs(&[1.0f32, 2.0], 3).unwrap_err();
assert!(
matches!(err_short, ValidationError::DimensionMismatch(_)),
"expected DimensionMismatch for rhs length < expected, got {:?}",
err_short
);
// RHS too long.
let err_long = validate_rhs(&[1.0f32, 2.0, 3.0, 4.0], 3).unwrap_err();
assert!(
matches!(err_long, ValidationError::DimensionMismatch(_)),
"expected DimensionMismatch for rhs length > expected, got {:?}",
err_long
);
// Combined solver input validation: rhs doesn't match matrix rows.
let mat = valid_3x3_f32();
let rhs_wrong = vec![1.0f32, 2.0]; // length 2, but matrix is 3x3
let err_combined = validate_solver_input(&mat, &rhs_wrong, 1e-6).unwrap_err();
assert!(
matches!(err_combined, ValidationError::DimensionMismatch(_)),
"combined validation should reject dimension mismatch, got {:?}",
err_combined
);
// Non-square matrix should be rejected by validate_solver_input.
let non_square = CsrMatrix::<f32> {
row_ptr: vec![0, 0, 0],
col_indices: vec![],
values: vec![],
rows: 2,
cols: 3,
};
let rhs_ns = vec![1.0f32, 2.0];
let err_ns = validate_solver_input(&non_square, &rhs_ns, 1e-6).unwrap_err();
assert!(
matches!(err_ns, ValidationError::DimensionMismatch(_)),
"non-square matrix should be rejected, got {:?}",
err_ns
);
}
// ---------------------------------------------------------------------------
// Reject invalid CSR: row_ptrs not monotonic
// ---------------------------------------------------------------------------
#[test]
fn test_reject_invalid_csr() {
// Break monotonicity of row_ptr.
let mut mat = identity_f32(4);
// row_ptr is [0, 1, 2, 3, 4]. Set row_ptr[2] = 0 to break monotonicity.
mat.row_ptr[2] = 0;
let err = validate_csr_matrix(&mat).unwrap_err();
assert!(
matches!(err, ValidationError::NonMonotonicRowPtrs { .. }),
"expected NonMonotonicRowPtrs, got {:?}",
err
);
// Verify the position is reported.
if let ValidationError::NonMonotonicRowPtrs { position } = err {
assert_eq!(
position, 2,
"monotonicity violation should be at position 2"
);
}
// row_ptr[0] != 0 should also be rejected.
let bad_start = CsrMatrix::<f32> {
row_ptr: vec![1, 2],
col_indices: vec![0],
values: vec![1.0],
rows: 1,
cols: 1,
};
let err_start = validate_csr_matrix(&bad_start).unwrap_err();
assert!(
matches!(err_start, ValidationError::DimensionMismatch(_)),
"row_ptr[0] != 0 should be rejected, got {:?}",
err_start
);
// row_ptr length != rows + 1 should be rejected.
let bad_len = CsrMatrix::<f32> {
row_ptr: vec![0, 1], // length 2, but rows = 3 so expect length 4
col_indices: vec![0],
values: vec![1.0],
rows: 3,
cols: 3,
};
let err_len = validate_csr_matrix(&bad_len).unwrap_err();
assert!(
matches!(err_len, ValidationError::DimensionMismatch(_)),
"wrong row_ptr length should be rejected, got {:?}",
err_len
);
// Column index out of bounds.
let mut bad_col = identity_f32(3);
bad_col.col_indices[1] = 99; // out of bounds
let err_col = validate_csr_matrix(&bad_col).unwrap_err();
assert!(
matches!(err_col, ValidationError::IndexOutOfBounds { .. }),
"column index out of bounds should be rejected, got {:?}",
err_col
);
}
// ---------------------------------------------------------------------------
// Reject oversized input: exceeds MAX_NODES
// ---------------------------------------------------------------------------
#[test]
fn test_reject_oversized_input() {
// Matrix with rows > MAX_NODES.
let oversized = CsrMatrix::<f32> {
row_ptr: vec![0; MAX_NODES + 2], // length = (MAX_NODES + 1) + 1
col_indices: vec![],
values: vec![],
rows: MAX_NODES + 1,
cols: 1,
};
let err = validate_csr_matrix(&oversized).unwrap_err();
assert!(
matches!(err, ValidationError::MatrixTooLarge { .. }),
"expected MatrixTooLarge for rows > MAX_NODES, got {:?}",
err
);
// Verify the reported dimensions.
if let ValidationError::MatrixTooLarge {
rows,
cols,
max_dim,
} = err
{
assert_eq!(rows, MAX_NODES + 1);
assert_eq!(cols, 1);
assert_eq!(max_dim, MAX_NODES);
}
// Matrix with cols > MAX_NODES.
let oversized_cols = CsrMatrix::<f32> {
row_ptr: vec![0, 0],
col_indices: vec![],
values: vec![],
rows: 1,
cols: MAX_NODES + 1,
};
let err_cols = validate_csr_matrix(&oversized_cols).unwrap_err();
assert!(
matches!(err_cols, ValidationError::MatrixTooLarge { .. }),
"expected MatrixTooLarge for cols > MAX_NODES, got {:?}",
err_cols
);
// A matrix at exactly MAX_NODES should be accepted (boundary check).
// We cannot allocate MAX_NODES entries in a test, but verify the logic:
// MAX_NODES rows with 0 nnz should be valid structurally.
// (Skipping actual allocation of 10M-entry row_ptr for test speed.)
}