Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,658 @@
# Sublinear-Time Solver: DDD Integration Patterns
**Version**: 1.0
**Date**: 2026-02-20
**Status**: Proposed
---
## 1. Anti-Corruption Layers
Anti-Corruption Layers (ACLs) translate between the Solver Core bounded context and each consuming bounded context, preventing domain model leakage.
### 1.1 Solver-to-Coherence ACL
Translates between Prime Radiant's sheaf graph types and the solver's sparse matrix types.
```rust
/// ACL: Coherence Engine ←→ Solver Core
pub struct CoherenceSolverAdapter {
solver: Arc<dyn SparseLaplacianSolver>,
cache: DashMap<u64, SolverResult>, // Keyed on graph version hash
}
impl CoherenceSolverAdapter {
/// Convert SheafGraph to CsrMatrix for solver input
pub fn sheaf_to_csr(graph: &SheafGraph) -> CsrMatrix<f32> {
let n = graph.node_count();
let mut row_ptrs = Vec::with_capacity(n + 1);
let mut col_indices = Vec::new();
let mut values = Vec::new();
row_ptrs.push(0u32);
for node_id in 0..n {
let edges = graph.edges_from(node_id);
let degree: f32 = edges.iter().map(|e| e.weight).sum();
// Laplacian: L = D - A
// Add diagonal (degree)
col_indices.push(node_id as u32);
values.push(degree);
// Add off-diagonal (-weight)
for edge in &edges {
col_indices.push(edge.target as u32);
values.push(-edge.weight);
}
row_ptrs.push(col_indices.len() as u32);
}
CsrMatrix { values: values.into(), col_indices: col_indices.into(), row_ptrs, rows: n, cols: n }
}
/// Convert solver result back to coherence energy
pub fn solution_to_energy(
solution: &SolverResult,
graph: &SheafGraph,
) -> CoherenceEnergy {
// Residual vector r = L*x represents per-edge contradiction
let residual_norm = solution.convergence.final_residual;
// Energy = sum of squared edge residuals
let energy = residual_norm * residual_norm;
// Per-node energy distribution
let node_energies: Vec<f64> = solution.solution.iter()
.map(|&x| (x as f64) * (x as f64))
.collect();
CoherenceEnergy {
global_energy: energy,
node_energies,
solver_algorithm: solution.algorithm_used,
solver_iterations: solution.iterations,
accuracy_bound: solution.error_bounds.relative_error,
}
}
/// Cached solve: reuse result if graph hasn't changed
pub async fn solve_coherence(
&self,
graph: &SheafGraph,
signal: &[f32],
) -> Result<CoherenceEnergy, SolverError> {
let graph_hash = graph.content_hash();
if let Some(cached) = self.cache.get(&graph_hash) {
return Ok(Self::solution_to_energy(&cached, graph));
}
let csr = Self::sheaf_to_csr(graph);
let system = SparseSystem::new(csr, signal.to_vec());
let result = self.solver.solve(&system)?;
self.cache.insert(graph_hash, result.clone());
Ok(Self::solution_to_energy(&result, graph))
}
}
```
### 1.2 Solver-to-GNN ACL
Translates between GNN message passing and sparse system solves.
```rust
/// ACL: GNN ←→ Solver Core
pub struct GnnSolverAdapter {
solver: Arc<dyn SolverEngine>,
}
impl GnnSolverAdapter {
/// Sublinear message aggregation using sparse solver
/// Replaces: O(n × avg_degree) per layer
/// With: O(nnz × log(1/ε)) per layer
pub fn sublinear_aggregate(
&self,
adjacency: &CsrMatrix<f32>,
features: &[Vec<f32>],
epsilon: f64,
) -> Result<Vec<Vec<f32>>, SolverError> {
let n = adjacency.rows;
let feature_dim = features[0].len();
let mut aggregated = vec![vec![0.0f32; feature_dim]; n];
// Solve A·X_col = F_col for each feature dimension
// Using batch solver amortization
for d in 0..feature_dim {
let rhs: Vec<f32> = features.iter().map(|f| f[d]).collect();
let system = SparseSystem::new(adjacency.clone(), rhs);
let result = self.solver.solve_with_budget(
&system,
ComputeBudget::for_lane(ComputeLane::Heavy),
)?;
for i in 0..n {
aggregated[i][d] = result.solution[i];
}
}
Ok(aggregated)
}
}
/// GNN aggregation strategy using solver
pub struct SublinearAggregation {
adapter: GnnSolverAdapter,
epsilon: f64,
}
impl AggregationStrategy for SublinearAggregation {
fn aggregate(
&self,
adjacency: &CsrMatrix<f32>,
features: &[Vec<f32>],
) -> Vec<Vec<f32>> {
self.adapter.sublinear_aggregate(adjacency, features, self.epsilon)
.unwrap_or_else(|_| {
// Fallback to mean aggregation
MeanAggregation.aggregate(adjacency, features)
})
}
}
```
### 1.3 Solver-to-Graph ACL
Translates between ruvector-graph's property graph model and solver's sparse adjacency.
```rust
/// ACL: Graph Analytics ←→ Solver Core
pub struct GraphSolverAdapter {
push_solver: Arc<dyn SublinearPageRank>,
}
impl GraphSolverAdapter {
/// Convert PropertyGraph to SparseAdjacency for solver
pub fn property_graph_to_adjacency(graph: &PropertyGraph) -> SparseAdjacency {
let n = graph.node_count();
let edges: Vec<(usize, usize, f32)> = graph.edges()
.map(|e| (e.source, e.target, e.weight.unwrap_or(1.0)))
.collect();
SparseAdjacency {
adj: CsrMatrix::from_edges(&edges, n),
directed: graph.is_directed(),
weighted: graph.is_weighted(),
}
}
/// Solver-accelerated PageRank using Forward Push
/// Replaces: O(n × m × iterations) power iteration
/// With: O(1/ε) Forward Push
pub fn fast_pagerank(
&self,
graph: &PropertyGraph,
source: usize,
alpha: f64,
epsilon: f64,
) -> Result<Vec<(usize, f64)>, SolverError> {
let adj = Self::property_graph_to_adjacency(graph);
let problem = GraphProblem {
id: ProblemId::new(),
graph: adj,
query: GraphQuery::SingleSource { source },
parameters: PushParameters { alpha, epsilon, max_iterations: 1_000_000 },
};
let result = self.push_solver.solve(&problem)?;
// Convert solver output to ranked node list
let mut ranked: Vec<(usize, f64)> = result.solution.iter()
.enumerate()
.map(|(i, &score)| (i, score as f64))
.filter(|(_, score)| *score > epsilon)
.collect();
ranked.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
Ok(ranked)
}
}
```
### 1.4 Platform ACL (WASM / NAPI / REST / MCP)
Serialization boundary between domain types and platform representations.
```rust
/// WASM ACL
#[wasm_bindgen]
pub struct JsSolverConfig {
inner: SolverConfig,
}
#[wasm_bindgen]
impl JsSolverConfig {
#[wasm_bindgen(constructor)]
pub fn new(js_config: JsValue) -> Result<JsSolverConfig, JsValue> {
let config: SolverConfig = serde_wasm_bindgen::from_value(js_config)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
Ok(JsSolverConfig { inner: config })
}
}
/// REST ACL
pub async fn solve_handler(
State(state): State<AppState>,
Json(request): Json<SolverRequest>,
) -> Result<Json<SolverResponse>, AppError> {
// Translate REST types to domain types
let system = SparseSystem::from_request(&request)?;
let budget = ComputeBudget::from_request(&request);
// Execute domain logic
let result = state.orchestrator.solve(system).await?;
// Translate domain result to REST response
Ok(Json(SolverResponse::from_result(&result)))
}
/// MCP ACL
pub fn solver_tool_schema() -> McpTool {
McpTool {
name: "solve_sublinear".to_string(),
description: "Solve sparse linear system using sublinear algorithms".to_string(),
input_schema: json!({
"type": "object",
"required": ["matrix_rows", "matrix_cols", "values", "col_indices", "row_ptrs", "rhs"],
"properties": {
"matrix_rows": { "type": "integer", "minimum": 1 },
"matrix_cols": { "type": "integer", "minimum": 1 },
"values": { "type": "array", "items": { "type": "number" } },
"col_indices": { "type": "array", "items": { "type": "integer" } },
"row_ptrs": { "type": "array", "items": { "type": "integer" } },
"rhs": { "type": "array", "items": { "type": "number" } },
"tolerance": { "type": "number", "default": 1e-6 },
"max_iterations": { "type": "integer", "default": 1000 },
"algorithm": { "type": "string", "enum": ["auto", "neumann", "cg", "true"] },
}
}),
}
}
```
---
## 2. Shared Kernel
Types shared between Solver Core and other bounded contexts.
### 2.1 Sparse Matrix Types
Shared between Solver Core and Min-Cut Context:
```rust
// crates/ruvector-solver/src/shared/sparse.rs
// Also used by ruvector-mincut
pub use crate::domain::values::CsrMatrix;
pub use crate::domain::values::SparsityProfile;
/// Conversion between CsrMatrix and CscMatrix (Compressed Sparse Column)
impl<T: Copy> CsrMatrix<T> {
pub fn to_csc(&self) -> CscMatrix<T> { ... }
pub fn transpose(&self) -> CsrMatrix<T> { ... }
}
```
### 2.2 Error Types
Shared across all solver-related contexts:
```rust
// crates/ruvector-solver/src/shared/errors.rs
#[derive(Debug, thiserror::Error)]
pub enum SolverError {
#[error("solver did not converge: {iterations} iterations, best residual {best_residual}")]
NonConvergence { iterations: usize, best_residual: f64, budget: ComputeBudget },
#[error("numerical instability in {source}: {detail}")]
NumericalInstability { source: &'static str, detail: String },
#[error("compute budget exhausted: {progress:.1}% complete")]
BudgetExhausted { budget: ComputeBudget, progress: f64 },
#[error("invalid input: {0}")]
InvalidInput(#[from] ValidationError),
#[error("precision loss: expected ε={expected_eps}, achieved ε={achieved_eps}")]
PrecisionLoss { expected_eps: f64, achieved_eps: f64 },
#[error("all algorithms failed")]
AllAlgorithmsFailed,
#[error("backend error: {0}")]
BackendError(#[from] Box<dyn std::error::Error + Send + Sync>),
}
```
### 2.3 Compute Budget
Shared between Solver and Coherence Gate's compute ladder:
```rust
// Used by both ruvector-solver and cognitum-gate-tilezero
pub use crate::domain::entities::ComputeBudget;
pub use crate::domain::entities::ComputeLane;
```
---
## 3. Published Language
### 3.1 Solver Protocol (JSON Schema)
```json
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://ruvector.io/schemas/solver/v1",
"title": "RuVector Sublinear Solver Protocol v1",
"definitions": {
"SolverRequest": {
"type": "object",
"required": ["system"],
"properties": {
"system": { "$ref": "#/definitions/SparseSystem" },
"config": { "$ref": "#/definitions/SolverConfig" },
"budget": { "$ref": "#/definitions/ComputeBudget" }
}
},
"SparseSystem": {
"type": "object",
"required": ["rows", "cols", "values", "col_indices", "row_ptrs", "rhs"],
"properties": {
"rows": { "type": "integer", "minimum": 1, "maximum": 10000000 },
"cols": { "type": "integer", "minimum": 1, "maximum": 10000000 },
"values": { "type": "array", "items": { "type": "number" } },
"col_indices": { "type": "array", "items": { "type": "integer", "minimum": 0 } },
"row_ptrs": { "type": "array", "items": { "type": "integer", "minimum": 0 } },
"rhs": { "type": "array", "items": { "type": "number" } }
}
},
"SolverResult": {
"type": "object",
"properties": {
"solution": { "type": "array", "items": { "type": "number" } },
"algorithm_used": { "type": "string" },
"iterations": { "type": "integer" },
"residual_norm": { "type": "number" },
"wall_time_us": { "type": "integer" },
"converged": { "type": "boolean" },
"error_bounds": {
"type": "object",
"properties": {
"absolute_error": { "type": "number" },
"relative_error": { "type": "number" }
}
}
}
}
}
}
```
---
## 4. Event-Driven Integration
### 4.1 Event Flow Architecture
```
SolverOrchestrator
emits SolverEvent
┌──────┴──────┐
│ broadcast │
│ ::Sender │
└──────┬──────┘
┌─────┼─────┬──────────┬──────────┐
▼ ▼ ▼ ▼ ▼
Coherence Metrics Stream Audit SONA
Engine Collector API Trail Learning
│ │ │ │ │
▼ ▼ ▼ ▼ ▼
Update Prometheus Server- Witness Update
energy counters Sent chain routing
Events entry weights
```
### 4.2 Coherence Gate as Solver Governor
```
Solve Request
┌────────────────┐
│ Complexity Est.│ "How expensive will this be?"
└───────┬────────┘
┌────────────────┐
│ Gate Decision │ Permit / Defer / Deny
└───┬────┬───┬───┘
│ │ │
Permit Defer Deny
│ │ │
▼ ▼ ▼
Execute Wait Reject
solver for with
human witness
approval
```
### 4.3 SONA Feedback Loop
```
[Solve Request] → [Route] → [Execute] → [Record Result]
▲ │
│ ▼
[Update Routing] [SONA micro-LoRA update]
[Weights] │
▲ │
└─── EWC-protected ──────┘
weight update
```
---
## 5. Dependency Injection
### 5.1 Generic Type Parameters
```rust
/// Solver generic over numeric backend
pub struct SublinearSolver<B: NumericBackend = NalgebraBackend> {
backend: B,
config: SolverConfig,
}
impl<B: NumericBackend> SolverEngine for SublinearSolver<B> {
type Input = SparseSystem;
type Output = SolverResult;
type Error = SolverError;
fn solve(&self, input: &Self::Input) -> Result<Self::Output, Self::Error> {
// Implementation using self.backend for matrix operations
todo!()
}
}
```
### 5.2 Runtime DI via Arc<dyn Trait>
```rust
/// Application state with DI
pub struct AppState {
pub solver: Arc<dyn SolverEngine<Input = SparseSystem, Output = SolverResult, Error = SolverError>>,
pub router: Arc<AlgorithmRouter>,
pub session_repo: Arc<dyn SolverSessionRepository>,
pub event_bus: broadcast::Sender<SolverEvent>,
}
```
---
## 6. Integration with Existing Patterns
### 6.1 Core-Binding-Surface Compliance
```
ruvector-solver → Core (pure Rust algorithms)
ruvector-solver-wasm → Binding (wasm-bindgen)
ruvector-solver-node → Binding (NAPI-RS)
@ruvector/solver (npm) → Surface (TypeScript API)
```
### 6.2 Event Sourcing Alignment
SolverEvent matches Prime Radiant's DomainEvent contract:
- `#[serde(tag = "type")]` — Discriminated union in JSON
- Deterministic replay via event log
- Content-addressable via SHAKE-256 hash
- Tamper-detectable in witness chain
### 6.3 Compute Ladder Integration
Solver maps to cognitum-gate-tilezero compute lanes:
| Lane | Solver Use Case | Budget |
|------|----------------|--------|
| Reflex | Cached result lookup | <1ms, 1MB |
| Retrieval | Small solve (n<1K) or Push query | ~10ms, 16MB |
| Heavy | Full CG/Neumann/BMSSP solve | ~100ms, 256MB |
| Deliberate | TRUE with preprocessing, streaming | Unbounded |
---
## 7. Migration Patterns
### 7.1 Strangler Fig for Coherence Engine
Gradual replacement of dense Laplacian computation:
```rust
impl CoherenceComputer {
pub fn compute_energy(&self, graph: &SheafGraph) -> CoherenceEnergy {
let density = graph.edge_density();
#[cfg(feature = "sublinear-coherence")]
if density < 0.05 {
// New: Sublinear path for sparse graphs
if let Ok(energy) = self.solver_adapter.solve_coherence(graph, &signal) {
return energy;
}
// Fallthrough to dense on solver failure
}
// Existing: Dense path (unchanged)
self.dense_laplacian_energy(graph)
}
}
```
Phase 1: Feature flag (opt-in, default off)
Phase 2: Default on for sparse graphs (density < 5%)
Phase 3: Default on for all graphs after benchmark validation
Phase 4: Remove dense path (breaking change in major version)
### 7.2 Branch by Abstraction for GNN
```rust
pub enum AggregationStrategy {
Mean,
Max,
Sum,
Attention,
#[cfg(feature = "sublinear-gnn")]
Sublinear { epsilon: f64 },
}
impl GnnLayer {
pub fn aggregate(&self, adj: &CsrMatrix<f32>, features: &[Vec<f32>]) -> Vec<Vec<f32>> {
match self.strategy {
AggregationStrategy::Mean => mean_aggregate(adj, features),
AggregationStrategy::Max => max_aggregate(adj, features),
AggregationStrategy::Sum => sum_aggregate(adj, features),
AggregationStrategy::Attention => attention_aggregate(adj, features),
#[cfg(feature = "sublinear-gnn")]
AggregationStrategy::Sublinear { epsilon } => {
SublinearAggregation::new(epsilon).aggregate(adj, features)
}
}
}
}
```
---
## 8. Cross-Cutting Concerns
### 8.1 Observability
```rust
use tracing::{instrument, info, warn};
impl SolverOrchestrator {
#[instrument(skip(self, system), fields(n = system.matrix.rows, nnz = system.matrix.nnz()))]
pub async fn solve(&self, system: SparseSystem) -> Result<SolverResult, SolverError> {
let algorithm = self.router.select(&system.profile());
info!(algorithm = ?algorithm, "routing decision");
let start = Instant::now();
let result = self.execute(algorithm, &system).await;
let elapsed = start.elapsed();
match &result {
Ok(r) => info!(iterations = r.iterations, residual = r.residual_norm, elapsed_us = elapsed.as_micros() as u64, "solve completed"),
Err(e) => warn!(error = %e, "solve failed"),
}
result
}
}
```
### 8.2 Caching
```rust
pub struct SolverCache {
results: DashMap<u64, (SolverResult, Instant)>,
ttl: Duration,
max_entries: usize,
}
impl SolverCache {
pub fn get_or_compute(
&self,
key: u64,
compute: impl FnOnce() -> Result<SolverResult, SolverError>,
) -> Result<SolverResult, SolverError> {
if let Some(entry) = self.results.get(&key) {
if entry.1.elapsed() < self.ttl {
return Ok(entry.0.clone());
}
}
let result = compute()?;
self.results.insert(key, (result.clone(), Instant::now()));
// Evict if over capacity
if self.results.len() > self.max_entries {
self.evict_oldest();
}
Ok(result)
}
}
```

View File

@@ -0,0 +1,321 @@
# Sublinear-Time Solver: DDD Strategic Design
**Version**: 1.0
**Date**: 2026-02-20
**Status**: Proposed
---
## 1. Domain Vision Statement
The **Sublinear Solver Domain** provides O(log n) to O(√n) mathematical computation capabilities that transform RuVector's polynomial-time bottlenecks into sublinear-time operations. By replacing dense O(n²-n³) linear algebra with sparse-aware solvers, we enable real-time performance at 100K+ node scales across the coherence engine, GNN, spectral methods, and graph analytics — delivering 10-600x speedups while maintaining configurable accuracy guarantees.
> **Core insight**: The same mathematical object (sparse linear system) appears in coherence computation, GNN message passing, spectral filtering, PageRank, and optimal transport. One solver serves them all.
---
## 2. Bounded Contexts
### 2.1 Solver Core Context
**Responsibility**: Pure mathematical algorithm implementations — Neumann series, Forward/Backward Push, Hybrid Random Walk, TRUE, Conjugate Gradient, BMSSP.
**Ubiquitous Language**:
- *Sparse system*: Ax = b where A has nnz << n² nonzeros
- *Convergence*: Residual norm ||Ax - b|| < ε
- *Neumann iteration*: x = Σ(I-A)^k · b
- *Push operation*: Redistribute probability mass along graph edges
- *Sparsification*: Reduce edge count while preserving spectral properties
- *Condition number*: κ(A) = λ_max / λ_min (drives CG convergence rate)
- *Diagonal dominance*: |a_ii| ≥ Σ|a_ij| for all rows
**Crate**: `ruvector-solver`
**Key Types**:
```rust
// Core domain model
pub struct CsrMatrix<T> { values, col_indices, row_ptrs, rows, cols }
pub struct SolverResult { solution, convergence_info, audit_entry }
pub struct ComputeBudget { max_wall_time, max_iterations, max_memory_bytes, lane }
pub enum Algorithm { Neumann, ForwardPush, BackwardPush, HybridRandomWalk, TRUE, CG, BMSSP }
```
### 2.2 Algorithm Routing Context
**Responsibility**: Selecting the optimal algorithm for each problem based on matrix properties, platform constraints, and learned performance history.
**Ubiquitous Language**:
- *Routing decision*: Map (problem profile) → Algorithm
- *Sparsity threshold*: Density below which sublinear methods outperform dense
- *Crossover point*: Problem size n where algorithm A becomes faster than B
- *Adaptive weight*: SONA-learned routing confidence per algorithm
- *Compute lane*: Reflex (<1ms) / Retrieval (~10ms) / Heavy (~100ms) / Deliberate (unbounded)
**Crate**: `ruvector-solver` (routing module)
### 2.3 Solver Platform Context
**Responsibility**: Platform-specific bindings that translate between domain types and platform-specific representations.
**Ubiquitous Language**:
- *JsSolver*: WASM-bindgen wrapper exposing solver to JavaScript
- *NapiSolver*: NAPI-RS wrapper for Node.js
- *Solver endpoint*: REST route for HTTP-based solving
- *Solver tool*: MCP JSON-RPC tool for AI agent access
**Crates**: `ruvector-solver-wasm`, `ruvector-solver-node`
### 2.4 Consuming Contexts (Existing RuVector Domains)
#### Coherence Context (prime-radiant)
- Consumes: SparseLaplacianSolver trait
- Translates: SheafGraph → CsrMatrix → CoherenceEnergy
- Integration: ACL adapter converts sheaf types to solver types
#### Learning Context (ruvector-gnn, sona)
- Consumes: SolverEngine for sublinear message aggregation
- Translates: Adjacency + Features → Sparse system → Aggregated features
- Integration: SublinearAggregation strategy alongside Mean/Max/Sum
#### Graph Analytics Context (ruvector-graph)
- Consumes: ForwardPush, BackwardPush for PageRank/centrality
- Translates: PropertyGraph → SparseAdjacency → PPR scores
- Integration: Published Language (shared sparse matrix format)
#### Spectral Context (ruvector-math)
- Consumes: Neumann, CG for spectral filtering
- Translates: Filter polynomial → Sparse system → Filtered signal
- Integration: NeumannFilter replaces ChevyshevFilter for rational approximation
#### Attention Context (ruvector-attention)
- Consumes: CG for PDE-based attention diffusion
- Translates: Attention matrix → Sparse Laplacian → Diffused attention
- Integration: PDEAttention mechanism using solver backend
#### Min-Cut Context (ruvector-mincut)
- Consumes: TRUE (shared sparsifier infrastructure)
- Translates: Graph → Sparsified graph → Effective resistances
- Integration: Partnership — co-evolving sparsification code
---
## 3. Context Map
```
┌─────────────────────────────────────────────────────────────────────────┐
│ SUBLINEAR SOLVER UNIVERSE │
│ │
│ ┌──────────────────┐ ┌──────────────────┐ │
│ │ ALGORITHM │ │ SOLVER CORE │ │
│ │ ROUTING │────▶│ │ │
│ │ │ CS │ Neumann, CG, │ │
│ │ Tier1/2/3 select │ │ Push, TRUE, BMSSP │ │
│ └──────────────────┘ └────────┬───────────┘ │
│ │ │
│ ┌──────────┴──────────┐ │
│ │ SOLVER PLATFORM │ │
│ │ │ │
│ │ WASM│NAPI│REST│MCP │ │
│ └──────────┬───────────┘ │
│ │ ACL │
└─────────────────────────────────────┼───────────────────────────────────┘
┌────────────────┼────────────────────┐
│ │ │
┌──────▼──────┐ ┌──────▼──────┐ ┌──────────▼─────┐
│ COHERENCE │ │ LEARNING │ │ GRAPH │
│ (prime-rad.) │ │ (gnn, sona) │ │ ANALYTICS │
│ │ │ │ │ │
│ Conformist │ │ OHS │ │ Published Lang. │
└──────────────┘ └──────────────┘ └──────────────────┘
│ │ │
┌──────▼──────┐ ┌──────▼──────┐ ┌──────────▼─────┐
│ SPECTRAL │ │ ATTENTION │ │ MIN-CUT │
│ (math) │ │ │ │ (mincut) │
│ │ │ │ │ │
│ Shared Kernel│ │ OHS │ │ Partnership │
└──────────────┘ └──────────────┘ └──────────────────┘
```
### Relationship Types
| From | To | Pattern | Description |
|------|-----|---------|-------------|
| Routing → Core | **Customer-Supplier** | Routing decides, Core executes |
| Platform → Core | **Anti-Corruption Layer** | Serialization boundary |
| Core → Coherence | **Conformist** | Solver adapts to coherence's trait interfaces |
| Core → GNN | **Open Host Service** | Solver exposes SolverEngine trait |
| Core → Graph | **Published Language** | Shared CsrMatrix format |
| Core → Spectral | **Shared Kernel** | Common matrix types, error types |
| Core → Min-Cut | **Partnership** | Co-evolving sparsification code |
| Core → Attention | **Open Host Service** | Solver exposes CG backend |
---
## 4. Strategic Classification
| Context | Type | Priority | Competitive Advantage |
|---------|------|----------|----------------------|
| **Solver Core** | Core Domain | P0 | Unique O(log n) solving — no competitor offers this |
| **Algorithm Routing** | Core Domain | P0 | Intelligent auto-selection differentiates from manual tuning |
| **Solver Platform** | Supporting | P1 | Multi-platform deployment (WASM/NAPI/REST/MCP) |
| **Integration Adapters** | Supporting | P1 | Seamless adoption by existing subsystems |
| **Coherence Integration** | Core | P0 | Primary use case: 50-600x coherence speedup |
| **GNN Integration** | Core | P1 | 10-50x message passing speedup |
| **Graph Integration** | Supporting | P1 | O(1/ε) PageRank, new capability |
| **Spectral Integration** | Supporting | P2 | 20-100x spectral filtering |
---
## 5. Subdomains
### 5.1 Core Subdomains (Build In-House)
- **Sparse Linear Algebra**: Neumann, CG, BMSSP implementations optimized for RuVector's workloads
- **Graph Proximity**: Forward/Backward Push, Hybrid Random Walk for PPR computation
- **Dimensionality Reduction**: JL projection and spectral sparsification (TRUE pipeline)
### 5.2 Supporting Subdomains (Build Lean)
- **Numerical Stability**: Regularization, Kahan summation, reorthogonalization, mass invariant monitoring
- **Compute Budget Management**: Resource allocation, deadline enforcement, memory tracking
- **Platform Adaptation**: WASM/NAPI/REST serialization, type conversion, Worker pools
### 5.3 Generic Subdomains (Buy/Reuse)
- **Configuration Management**: Reuse `serde` + feature flags (existing pattern)
- **Logging and Metrics**: Reuse `tracing` ecosystem (existing pattern)
- **Error Handling**: Follow existing `thiserror` pattern
- **Benchmarking**: Reuse Criterion.rs infrastructure
---
## 6. Ubiquitous Language Glossary
### Solver Core Terms
| Term | Definition |
|------|-----------|
| **CsrMatrix** | Compressed Sparse Row format: three arrays (values, col_indices, row_ptrs) representing a sparse matrix |
| **SpMV** | Sparse Matrix-Vector multiply: y = A·x where A is CSR |
| **Neumann Series** | x = Σ_{k=0}^{K} (I-A)^k · b — converges when ρ(I-A) < 1 |
| **Forward Push** | Redistribute positive residual mass to neighbors in graph |
| **PPR** | Personalized PageRank: random-walk-based node relevance |
| **TRUE** | Toolbox for Research on Universal Estimation: JL + sparsify + Neumann |
| **CG** | Conjugate Gradient: iterative Krylov solver for SPD systems |
| **BMSSP** | Bounded Min-Cut Sparse Solver Paradigm: multigrid V-cycle solver |
| **Spectral Radius** | ρ(A) = max eigenvalue magnitude; ρ(I-A) < 1 required for Neumann |
| **Condition Number** | κ(A) = λ_max/λ_min; CG converges in O(√κ) iterations |
| **Diagonal Dominance** | |a_ii| ≥ Σ_{j≠i} |a_ij|; ensures Neumann convergence |
| **Sparsifier** | Reweighted subgraph preserving spectral properties within (1±ε) |
| **JL Projection** | Johnson-Lindenstrauss random projection reducing dimensionality |
### Integration Terms
| Term | Definition |
|------|-----------|
| **Compute Lane** | Execution tier: Reflex (<1ms), Retrieval (~10ms), Heavy (~100ms), Deliberate (unbounded) |
| **Solver Event** | Domain event emitted during/after solve (SolveRequested, IterationCompleted, etc.) |
| **Witness Entry** | SHAKE-256 hash chain entry in audit trail |
| **PermitToken** | Authorization token from MCP coherence gate |
| **Coherence Energy** | Scalar measure of system contradiction from sheaf Laplacian residuals |
| **Fallback Chain** | Ordered algorithm cascade: sublinear → CG → dense |
| **Error Budget** | ε_total decomposed across pipeline stages |
### Platform Terms
| Term | Definition |
|------|-----------|
| **Core-Binding-Surface** | Three-crate pattern: pure Rust core → WASM/NAPI binding → npm surface |
| **JsSolver** | wasm-bindgen struct exposing solver to browser JavaScript |
| **NapiSolver** | NAPI-RS struct exposing solver to Node.js |
| **Worker Pool** | Web Worker collection for browser parallelism |
| **SharedArrayBuffer** | Browser shared memory for zero-copy inter-worker data |
---
## 7. Domain Events (Cross-Context)
| Event | Producer | Consumers | Payload |
|-------|----------|-----------|---------|
| `SolveRequested` | Solver Core | Metrics, Audit | request_id, algorithm, dimensions |
| `SolveConverged` | Solver Core | Coherence, Metrics, Streaming API | request_id, iterations, residual |
| `AlgorithmFallback` | Solver Core | Routing (SONA), Metrics | from_algorithm, to_algorithm, reason |
| `SparsityDetected` | Sparsity Analyzer | Routing | density, recommended_path |
| `BudgetExhausted` | Budget Enforcer | Coherence Gate, Metrics | budget, best_residual |
| `CoherenceUpdated` | Coherence Adapter | Prime Radiant | energy_before, energy_after, solver_used |
| `RoutingDecision` | Algorithm Router | SONA Learning | features, selected_algorithm, latency |
### Event Flow
```
SolverOrchestrator
emits SolverEvent
┌────────┴────────┐
│ broadcast::Sender│
└────────┬────────┘
┌──────┬───────┼───────┬──────────┐
▼ ▼ ▼ ▼ ▼
Coherence Metrics Stream Audit SONA
Engine Collector API Trail Learning
```
---
## 8. Strategic Patterns
### 8.1 Event Sourcing (Aligned with Prime Radiant)
SolverEvent follows the same tagged-enum pattern as Prime Radiant's DomainEvent:
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum SolverEvent {
SolveRequested { ... },
IterationCompleted { ... },
SolveConverged { ... },
AlgorithmFallback { ... },
BudgetExhausted { ... },
}
```
Enables deterministic replay, tamper detection via content hashes, and forensic analysis.
### 8.2 CQRS for Solver
- **Command side**: `solve(input)` — mutates state, produces events
- **Query side**: `estimate_complexity(input)` — pure function, no side effects
- Separate read/write models enable caching of complexity estimates
### 8.3 Saga for Multi-Phase Solves
TRUE algorithm requires three sequential phases:
1. JL Projection (reduces dimensionality)
2. Spectral Sparsification (reduces edges)
3. Neumann Solve (actual computation)
Each phase is compensatable: if phase 3 fails, phases 1-2 results are cached for retry with different solver.
```
[JL Projection] ──success──▶ [Sparsification] ──success──▶ [Neumann Solve]
│ │ │
failure failure failure
│ │ │
▼ ▼ ▼
[Log & Abort] [Retry with coarser ε] [Fallback to CG]
```
---
## 9. Evolution Strategy
| Phase | Timeline | Scope | Key Milestone |
|-------|----------|-------|---------------|
| Phase 1 | Weeks 1-2 | Foundation crate + CG + Neumann | First `cargo test` passing |
| Phase 2 | Weeks 3-5 | Push algorithms + routing + coherence integration | Coherence 10x speedup |
| Phase 3 | Weeks 6-8 | TRUE + BMSSP + WASM + NAPI | Full platform coverage |
| Phase 4 | Weeks 9-10 | SONA learning + benchmarks + security hardening | Production readiness |

View File

@@ -0,0 +1,784 @@
# Sublinear-Time Solver: DDD Tactical Design
**Version**: 1.0
**Date**: 2026-02-20
**Status**: Proposed
---
## 1. Aggregate Design
### 1.1 SolverSession Aggregate (Root)
The SolverSession is the primary aggregate root, encapsulating the lifecycle of a solve operation.
```rust
/// Aggregate root for solver operations
pub struct SolverSession {
// Identity
id: SessionId,
// Configuration (set at creation, immutable during solve)
config: SolverConfig,
budget: ComputeBudget,
// State (mutated during solve lifecycle)
state: SessionState,
current_algorithm: Algorithm,
// Event sourcing
history: Vec<SolverEvent>,
version: u64,
// Timing
created_at: Timestamp,
started_at: Option<Timestamp>,
completed_at: Option<Timestamp>,
}
/// Session state machine
#[derive(Debug, Clone, PartialEq)]
pub enum SessionState {
/// Created but not yet started
Idle,
/// Preprocessing (TRUE: JL, sparsification)
Preprocessing { phase: PreprocessPhase, progress: f64 },
/// Active solving
Solving { iteration: usize, residual: f64 },
/// Successfully converged
Converged { result: SolverResult },
/// Failed with error
Failed { error: SolverError, best_effort: Option<Vec<f32>> },
/// Cancelled by user or budget enforcement
Cancelled { reason: String },
}
impl SolverSession {
// === Invariants ===
/// Budget is never exceeded
fn check_budget(&self) -> Result<(), SolverError> {
if let Some(started) = self.started_at {
let elapsed = Timestamp::now() - started;
if elapsed > self.budget.max_wall_time {
return Err(SolverError::BudgetExhausted {
budget: self.budget.clone(),
progress: self.progress(),
});
}
}
if let SessionState::Solving { iteration, .. } = &self.state {
if *iteration > self.budget.max_iterations as usize {
return Err(SolverError::BudgetExhausted {
budget: self.budget.clone(),
progress: self.progress(),
});
}
}
Ok(())
}
/// State transitions are valid
fn transition(&mut self, new_state: SessionState) -> Result<(), SolverError> {
let valid = match (&self.state, &new_state) {
(SessionState::Idle, SessionState::Preprocessing { .. }) => true,
(SessionState::Idle, SessionState::Solving { .. }) => true,
(SessionState::Preprocessing { .. }, SessionState::Solving { .. }) => true,
(SessionState::Solving { .. }, SessionState::Solving { .. }) => true,
(SessionState::Solving { .. }, SessionState::Converged { .. }) => true,
(SessionState::Solving { .. }, SessionState::Failed { .. }) => true,
(_, SessionState::Cancelled { .. }) => true, // Always cancellable
_ => false,
};
if !valid {
return Err(SolverError::InvalidStateTransition {
from: format!("{:?}", self.state),
to: format!("{:?}", new_state),
});
}
self.state = new_state;
self.version += 1;
Ok(())
}
// === Commands ===
pub fn start_solve(&mut self, system: &SparseSystem) -> Result<(), SolverError> {
self.check_budget()?;
self.started_at = Some(Timestamp::now());
self.history.push(SolverEvent::SolveRequested {
request_id: self.id,
algorithm: self.current_algorithm,
input_dimensions: (system.matrix.rows, system.matrix.cols, system.matrix.nnz()),
timestamp: Timestamp::now(),
});
self.transition(SessionState::Solving { iteration: 0, residual: f64::INFINITY })
}
pub fn record_iteration(&mut self, iteration: usize, residual: f64) -> Result<(), SolverError> {
self.check_budget()?;
self.history.push(SolverEvent::IterationCompleted {
request_id: self.id,
iteration,
residual_norm: residual,
wall_time_us: self.elapsed_us(),
timestamp: Timestamp::now(),
});
if residual < self.config.tolerance {
self.transition(SessionState::Converged {
result: SolverResult {
iterations: iteration,
final_residual: residual,
..Default::default()
},
})
} else {
self.transition(SessionState::Solving { iteration, residual })
}
}
pub fn fail_and_fallback(&mut self, error: SolverError) -> Option<Algorithm> {
let fallback = self.next_fallback();
self.history.push(SolverEvent::AlgorithmFallback {
request_id: self.id,
from_algorithm: self.current_algorithm,
to_algorithm: fallback,
reason: error.to_string(),
timestamp: Timestamp::now(),
});
if let Some(next) = fallback {
self.current_algorithm = next;
self.state = SessionState::Idle; // Reset for retry
Some(next)
} else {
let _ = self.transition(SessionState::Failed {
error,
best_effort: None,
});
None
}
}
fn next_fallback(&self) -> Option<Algorithm> {
match self.current_algorithm {
Algorithm::Neumann | Algorithm::ForwardPush | Algorithm::BackwardPush |
Algorithm::HybridRandomWalk | Algorithm::TRUE | Algorithm::BMSSP
=> Some(Algorithm::CG),
Algorithm::CG => Some(Algorithm::DenseDirect),
Algorithm::DenseDirect => None, // No further fallback
}
}
}
```
### 1.2 SparseSystem Aggregate
```rust
/// Immutable representation of a sparse linear system Ax = b
pub struct SparseSystem {
id: SystemId,
matrix: CsrMatrix<f32>,
rhs: Vec<f32>,
metadata: SystemMetadata,
}
pub struct SystemMetadata {
pub sparsity: SparsityProfile,
pub is_spd: bool,
pub is_laplacian: bool,
pub condition_estimate: Option<f64>,
pub source_context: SourceContext,
}
pub enum SourceContext {
CoherenceLaplacian { graph_id: String },
GnnAdjacency { layer: usize, node_count: usize },
GraphAnalytics { query_type: String },
SpectralFilter { filter_degree: usize },
UserProvided,
}
impl SparseSystem {
// === Invariants ===
pub fn validate(&self) -> Result<(), ValidationError> {
// Matrix dimensions match RHS
if self.matrix.rows != self.rhs.len() {
return Err(ValidationError::DimensionMismatch {
expected: self.matrix.rows,
actual: self.rhs.len(),
});
}
// All values finite
for v in self.matrix.values.iter() {
if !v.is_finite() {
return Err(ValidationError::InvalidNumber {
field: "matrix_values", index: 0, reason: "non-finite",
});
}
}
for v in self.rhs.iter() {
if !v.is_finite() {
return Err(ValidationError::InvalidNumber {
field: "rhs", index: 0, reason: "non-finite",
});
}
}
// Sparsity > 0
if self.matrix.nnz() == 0 {
return Err(ValidationError::EmptyMatrix);
}
Ok(())
}
}
```
### 1.3 GraphProblem Aggregate
```rust
/// Graph-based problem for Push algorithms and random walks
pub struct GraphProblem {
id: ProblemId,
graph: SparseAdjacency,
query: GraphQuery,
parameters: PushParameters,
}
pub struct SparseAdjacency {
pub adj: CsrMatrix<f32>,
pub directed: bool,
pub weighted: bool,
}
pub enum GraphQuery {
SingleSource { source: usize },
SingleTarget { target: usize },
Pairwise { source: usize, target: usize },
BatchSources { sources: Vec<usize> },
AllNodes,
}
pub struct PushParameters {
pub alpha: f64, // Damping factor (default: 0.85)
pub epsilon: f64, // Push threshold
pub max_iterations: u64, // Safety bound
}
```
---
## 2. Entity Design
### 2.1 SolverResult Entity
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SolverResult {
pub id: ResultId,
pub session_id: SessionId,
pub algorithm_used: Algorithm,
pub solution: Vec<f32>,
pub iterations: usize,
pub residual_norm: f64,
pub wall_time_us: u64,
pub convergence: ConvergenceInfo,
pub error_bounds: ErrorBounds,
pub audit_entry: SolverAuditEntry,
}
```
### 2.2 ComputeBudget Entity
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComputeBudget {
pub max_wall_time: Duration,
pub max_iterations: u64,
pub max_memory_bytes: usize,
pub lane: ComputeLane,
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum ComputeLane {
Reflex, // < 1ms — cached results, trivial problems
Retrieval, // ~ 10ms — simple solves (small n, well-conditioned)
Heavy, // ~ 100ms — full solver pipeline
Deliberate, // unbounded — streaming progress, complex problems
}
impl ComputeBudget {
pub fn for_lane(lane: ComputeLane) -> Self {
match lane {
ComputeLane::Reflex => Self {
max_wall_time: Duration::from_millis(1),
max_iterations: 10,
max_memory_bytes: 1 << 20, // 1MB
lane,
},
ComputeLane::Retrieval => Self {
max_wall_time: Duration::from_millis(10),
max_iterations: 100,
max_memory_bytes: 16 << 20, // 16MB
lane,
},
ComputeLane::Heavy => Self {
max_wall_time: Duration::from_millis(100),
max_iterations: 10_000,
max_memory_bytes: 256 << 20, // 256MB
lane,
},
ComputeLane::Deliberate => Self {
max_wall_time: Duration::from_secs(300),
max_iterations: 1_000_000,
max_memory_bytes: 2 << 30, // 2GB
lane,
},
}
}
}
```
### 2.3 AlgorithmProfile Entity
```rust
#[derive(Debug, Clone)]
pub struct AlgorithmProfile {
pub algorithm: Algorithm,
pub complexity_class: ComplexityClass,
pub sparsity_range: (f64, f64), // (min_density, max_density)
pub size_range: (usize, usize), // (min_n, max_n)
pub deterministic: bool,
pub parallelizable: bool,
pub wasm_compatible: bool,
pub numerical_stability: Stability,
pub convergence_guarantee: ConvergenceGuarantee,
}
pub enum ComplexityClass {
Logarithmic, // O(log n)
SquareRoot, // O(√n)
NearLinear, // O(n · polylog(n))
Linear, // O(n)
Quadratic, // O(n²)
}
pub enum ConvergenceGuarantee {
Guaranteed { max_iterations: usize },
Probabilistic { confidence: f64 },
Conditional { requirement: &'static str },
}
```
---
## 3. Value Objects
### 3.1 CsrMatrix<T>
```rust
/// Immutable value object — equality by content
#[derive(Clone)]
pub struct CsrMatrix<T: Copy> {
pub values: AlignedVec<T>,
pub col_indices: AlignedVec<u32>,
pub row_ptrs: Vec<u32>,
pub rows: usize,
pub cols: usize,
}
impl<T: Copy> CsrMatrix<T> {
pub fn nnz(&self) -> usize { self.values.len() }
pub fn density(&self) -> f64 { self.nnz() as f64 / (self.rows * self.cols) as f64 }
pub fn memory_bytes(&self) -> usize {
self.values.len() * size_of::<T>()
+ self.col_indices.len() * size_of::<u32>()
+ self.row_ptrs.len() * size_of::<u32>()
}
}
```
### 3.2 ConvergenceInfo
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConvergenceInfo {
pub converged: bool,
pub iterations: usize,
pub residual_history: Vec<f64>,
pub final_residual: f64,
pub convergence_rate: f64, // ratio of consecutive residuals
}
```
### 3.3 SparsityProfile
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SparsityProfile {
pub nonzero_count: usize,
pub total_elements: usize,
pub density: f64,
pub diagonal_dominance: f64, // fraction of rows that are diag. dominant
pub bandwidth: usize, // max |i - j| for nonzero a_ij
pub symmetry: f64, // fraction of entries with a_ij == a_ji
pub avg_row_nnz: f64,
pub max_row_nnz: usize,
}
```
### 3.4 ComplexityEstimate
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComplexityEstimate {
pub estimated_flops: u64,
pub estimated_memory_bytes: u64,
pub estimated_wall_time_us: u64,
pub recommended_algorithm: Algorithm,
pub recommended_lane: ComputeLane,
pub confidence: f64,
}
```
### 3.5 ErrorBounds
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErrorBounds {
pub absolute_error: f64, // ||x_approx - x_exact||
pub relative_error: f64, // ||x_approx - x_exact|| / ||x_exact||
pub residual_norm: f64, // ||A*x_approx - b||
pub confidence: f64, // Statistical confidence (for randomized algorithms)
}
```
---
## 4. Domain Events
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum SolverEvent {
SolveRequested {
request_id: SessionId,
algorithm: Algorithm,
input_dimensions: (usize, usize, usize), // rows, cols, nnz
timestamp: Timestamp,
},
IterationCompleted {
request_id: SessionId,
iteration: usize,
residual_norm: f64,
wall_time_us: u64,
timestamp: Timestamp,
},
SolveConverged {
request_id: SessionId,
total_iterations: usize,
final_residual: f64,
total_wall_time_us: u64,
accuracy: ErrorBounds,
timestamp: Timestamp,
},
SolveFailed {
request_id: SessionId,
error: String,
best_residual: f64,
iterations_completed: usize,
timestamp: Timestamp,
},
AlgorithmFallback {
request_id: SessionId,
from_algorithm: Algorithm,
to_algorithm: Option<Algorithm>,
reason: String,
timestamp: Timestamp,
},
BudgetExhausted {
request_id: SessionId,
budget: ComputeBudget,
best_residual: f64,
timestamp: Timestamp,
},
ComplexityEstimated {
request_id: SessionId,
estimate: ComplexityEstimate,
timestamp: Timestamp,
},
SparsityDetected {
system_id: SystemId,
profile: SparsityProfile,
recommended_path: Algorithm,
timestamp: Timestamp,
},
NumericalWarning {
request_id: SessionId,
warning_type: NumericalWarningType,
detail: String,
timestamp: Timestamp,
},
}
pub enum NumericalWarningType {
NearSingular,
SlowConvergence,
OrthogonalityLoss,
MassInvariantViolation,
PrecisionLoss,
}
```
---
## 5. Domain Services
### 5.1 SolverOrchestrator
```rust
/// Orchestrates: routing → validation → execution → fallback → result
pub struct SolverOrchestrator {
router: AlgorithmRouter,
solvers: HashMap<Algorithm, Box<dyn SolverEngine>>,
budget_enforcer: BudgetEnforcer,
event_bus: broadcast::Sender<SolverEvent>,
}
impl SolverOrchestrator {
pub async fn solve(&self, system: SparseSystem) -> Result<SolverResult, SolverError> {
// 1. Analyze sparsity
let profile = system.metadata.sparsity.clone();
self.event_bus.send(SolverEvent::SparsityDetected { .. });
// 2. Route to optimal algorithm
let algorithm = self.router.select(&ProblemProfile::from(&system));
let estimate = self.estimate_complexity(&system);
self.event_bus.send(SolverEvent::ComplexityEstimated { .. });
// 3. Create session
let mut session = SolverSession::new(algorithm, estimate.recommended_lane);
// 4. Execute with fallback chain
loop {
match self.execute_algorithm(&mut session, &system).await {
Ok(result) => return Ok(result),
Err(e) => {
match session.fail_and_fallback(e) {
Some(_next) => continue, // Retry with fallback
None => return Err(SolverError::AllAlgorithmsFailed),
}
}
}
}
}
}
```
### 5.2 SparsityAnalyzer
```rust
/// Analyzes matrix properties for routing decisions
pub struct SparsityAnalyzer;
impl SparsityAnalyzer {
pub fn analyze(matrix: &CsrMatrix<f32>) -> SparsityProfile {
SparsityProfile {
nonzero_count: matrix.nnz(),
total_elements: matrix.rows * matrix.cols,
density: matrix.density(),
diagonal_dominance: Self::measure_diagonal_dominance(matrix),
bandwidth: Self::estimate_bandwidth(matrix),
symmetry: Self::measure_symmetry(matrix),
avg_row_nnz: matrix.nnz() as f64 / matrix.rows as f64,
max_row_nnz: Self::max_row_nnz(matrix),
}
}
}
```
### 5.3 ConvergenceMonitor
```rust
/// Monitors convergence and triggers fallback
pub struct ConvergenceMonitor {
stagnation_window: usize, // Look back N iterations
stagnation_threshold: f64, // Improvement < threshold → stagnant
divergence_factor: f64, // Residual growth > factor → diverging
}
impl ConvergenceMonitor {
pub fn check(&self, history: &[f64]) -> ConvergenceStatus {
if history.len() < 2 {
return ConvergenceStatus::Progressing;
}
let latest = *history.last().unwrap();
let previous = history[history.len() - 2];
// Divergence check
if latest > previous * self.divergence_factor {
return ConvergenceStatus::Diverging;
}
// Stagnation check
if history.len() >= self.stagnation_window {
let window_start = history[history.len() - self.stagnation_window];
let improvement = (window_start - latest) / window_start;
if improvement < self.stagnation_threshold {
return ConvergenceStatus::Stagnant;
}
}
ConvergenceStatus::Progressing
}
}
```
---
## 6. Repositories
### 6.1 SolverSessionRepository
```rust
pub trait SolverSessionRepository: Send + Sync {
fn save(&self, session: &SolverSession) -> Result<(), RepositoryError>;
fn find_by_id(&self, id: &SessionId) -> Result<Option<SolverSession>, RepositoryError>;
fn find_active(&self) -> Result<Vec<SolverSession>, RepositoryError>;
fn delete(&self, id: &SessionId) -> Result<(), RepositoryError>;
}
/// In-memory implementation (server, WASM)
pub struct InMemorySessionRepo {
sessions: DashMap<SessionId, SolverSession>,
}
```
---
## 7. Factories
### 7.1 SolverFactory
```rust
pub struct SolverFactory;
impl SolverFactory {
pub fn create(algorithm: Algorithm, config: &SolverConfig) -> Box<dyn SolverEngine> {
match algorithm {
Algorithm::Neumann => Box::new(NeumannSolver::from_config(config)),
Algorithm::ForwardPush => Box::new(ForwardPushSolver::from_config(config)),
Algorithm::BackwardPush => Box::new(BackwardPushSolver::from_config(config)),
Algorithm::HybridRandomWalk => Box::new(HybridRandomWalkSolver::from_config(config)),
Algorithm::TRUE => Box::new(TrueSolver::from_config(config)),
Algorithm::CG => Box::new(ConjugateGradientSolver::from_config(config)),
Algorithm::BMSSP => Box::new(BmsspSolver::from_config(config)),
Algorithm::DenseDirect => Box::new(DenseDirectSolver::from_config(config)),
}
}
}
```
### 7.2 SparseSystemFactory
```rust
pub struct SparseSystemFactory;
impl SparseSystemFactory {
pub fn from_hnsw(hnsw: &HnswIndex, level: usize) -> SparseSystem { ... }
pub fn from_adjacency_list(edges: &[(usize, usize, f32)], n: usize) -> SparseSystem { ... }
pub fn from_dense(matrix: &[Vec<f32>], threshold: f32) -> SparseSystem { ... }
pub fn laplacian_from_graph(graph: &SparseAdjacency) -> SparseSystem { ... }
}
```
---
## 8. Module Structure
```
crates/ruvector-solver/src/
├── lib.rs # Public API surface
├── domain/
│ ├── mod.rs
│ ├── aggregates/
│ │ ├── session.rs # SolverSession aggregate
│ │ ├── sparse_system.rs # SparseSystem aggregate
│ │ └── graph_problem.rs # GraphProblem aggregate
│ ├── entities/
│ │ ├── result.rs # SolverResult entity
│ │ ├── budget.rs # ComputeBudget entity
│ │ └── profile.rs # AlgorithmProfile entity
│ ├── values/
│ │ ├── csr_matrix.rs # CsrMatrix<T> value object
│ │ ├── convergence.rs # ConvergenceInfo value object
│ │ ├── sparsity.rs # SparsityProfile value object
│ │ └── estimate.rs # ComplexityEstimate value object
│ └── events.rs # SolverEvent enum
├── services/
│ ├── orchestrator.rs # SolverOrchestrator
│ ├── sparsity_analyzer.rs # SparsityAnalyzer
│ ├── convergence_monitor.rs # ConvergenceMonitor
│ └── budget_enforcer.rs # BudgetEnforcer
├── algorithms/
│ ├── neumann.rs
│ ├── forward_push.rs
│ ├── backward_push.rs
│ ├── hybrid_random_walk.rs
│ ├── true_solver.rs
│ ├── conjugate_gradient.rs
│ ├── bmssp.rs
│ └── dense_direct.rs
├── routing/
│ ├── router.rs # AlgorithmRouter
│ ├── heuristic.rs # Tier 2 rules
│ └── adaptive.rs # Tier 3 SONA
├── infrastructure/
│ ├── arena.rs # Arena allocator integration
│ ├── simd.rs # SIMD dispatch
│ ├── repository.rs # Session repository
│ └── factory.rs # SolverFactory, SparseSystemFactory
└── traits.rs # SolverEngine, NumericBackend, etc.
```
---
## 9. State Machine
```
┌─────────┐
│ IDLE │
└────┬────┘
│ start_solve()
┌────▼────┐
┌─────│PREPROC. │──────┐
│ └────┬────┘ │
│ │ done │ cancel
│ ┌────▼────┐ │
│ │ SOLVING │◀────┤ (back to SOLVING on retry)
│ └──┬──┬───┘ │
│ │ │ │
│ converge fail │
│ │ │ │
│ ┌────▼┐ ┌▼────┐ │
│ │CONV.│ │FAIL │ │
│ └─────┘ └──┬──┘ │
│ │ │
│ fallback? │
│ Y N │
│ │ │ │
│ ┌────▼┐ │ ┌──▼──────┐
└────▶│IDLE │ └─▶│CANCELLED│
└─────┘ └─────────┘
```