Squashed 'vendor/ruvector/' content from commit b64c2172
git-subtree-dir: vendor/ruvector git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
38
crates/ruvector-graph-transformer/Cargo.toml
Normal file
38
crates/ruvector-graph-transformer/Cargo.toml
Normal file
@@ -0,0 +1,38 @@
|
||||
[package]
|
||||
name = "ruvector-graph-transformer"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
repository.workspace = true
|
||||
description = "Unified graph transformer with proof-gated mutation substrate — 8 verified modules for physics, biological, manifold, temporal, and economic graph intelligence"
|
||||
readme = "README.md"
|
||||
keywords = ["graph-transformer", "proof-gated", "attention", "verified", "neural-network"]
|
||||
categories = ["science", "mathematics", "algorithms"]
|
||||
|
||||
[features]
|
||||
default = ["sublinear", "verified-training"]
|
||||
sublinear = []
|
||||
physics = []
|
||||
biological = []
|
||||
self-organizing = []
|
||||
verified-training = []
|
||||
manifold = []
|
||||
temporal = []
|
||||
economic = []
|
||||
full = ["sublinear", "physics", "biological", "self-organizing", "verified-training", "manifold", "temporal", "economic"]
|
||||
|
||||
[dependencies]
|
||||
ruvector-verified = { version = "0.1", path = "../ruvector-verified", features = ["ultra", "hnsw-proofs"] }
|
||||
ruvector-gnn = { version = "2.0", path = "../ruvector-gnn" }
|
||||
ruvector-attention = { version = "2.0", path = "../ruvector-attention" }
|
||||
ruvector-mincut = { version = "2.0", path = "../ruvector-mincut" }
|
||||
ruvector-solver = { version = "2.0", path = "../ruvector-solver" }
|
||||
ruvector-coherence = { version = "2.0", path = "../ruvector-coherence" }
|
||||
serde = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = { workspace = true }
|
||||
267
crates/ruvector-graph-transformer/README.md
Normal file
267
crates/ruvector-graph-transformer/README.md
Normal file
@@ -0,0 +1,267 @@
|
||||
# ruvector-graph-transformer
|
||||
|
||||
[](https://crates.io/crates/ruvector-graph-transformer)
|
||||
[](https://docs.rs/ruvector-graph-transformer)
|
||||
[](LICENSE)
|
||||
[]()
|
||||
|
||||
**A graph neural network where every operation is mathematically proven correct before it runs.**
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
ruvector-graph-transformer = "2.0"
|
||||
```
|
||||
|
||||
Most graph neural networks let you modify data freely -- add nodes, change weights, update edges -- with no safety guarantees. If a bug corrupts your graph, you find out later (or never). This crate takes a different approach: every mutation to graph state requires a formal proof that the operation is valid. No proof, no access. Think of it like a lock on every piece of data that can only be opened with the right mathematical key.
|
||||
|
||||
On top of that safety layer, 8 specialized modules bring cutting-edge graph intelligence: attention that scales to millions of nodes without checking every pair, physics simulations that conserve energy by construction, neurons that only fire when they should, training that automatically rolls back bad gradient steps, and geometry that works in curved spaces instead of assuming everything is flat.
|
||||
|
||||
The result is a graph transformer you can trust: if it produces an answer, that answer was computed correctly. Part of the [RuVector](https://github.com/ruvnet/ruvector) ecosystem.
|
||||
|
||||
| | Standard GNN | ruvector-graph-transformer |
|
||||
|---|---|---|
|
||||
| **Mutation safety** | Unchecked -- bugs corrupt silently | Proof-gated: no mutation without formal witness |
|
||||
| **Attention complexity** | O(n^2) -- slows down on large graphs | O(n log n) sublinear via LSH/PPR/spectral |
|
||||
| **Training guarantees** | Hope for the best | Verified: certificates, delta-apply rollback, fail-closed |
|
||||
| **Geometry** | Euclidean only -- assumes flat space | Product manifolds S^n x H^m x R^k |
|
||||
| **Causality** | No enforcement | Temporal masking + Granger causality extraction |
|
||||
| **Incentive alignment** | Not considered | Nash equilibrium + Shapley attribution |
|
||||
| **Platforms** | Python only | Rust + WASM + Node.js (NAPI-RS) |
|
||||
|
||||
## Key Features
|
||||
|
||||
| Feature | What It Does | Why It Matters |
|
||||
|---------|-------------|----------------|
|
||||
| **Proof-Gated Mutation** | Every write to graph state requires a formal proof | Bugs cannot silently corrupt your data -- invalid operations are rejected before they happen |
|
||||
| **Sublinear Attention** | LSH-bucketed, PPR-sampled, and spectral sparsification | Scales to millions of nodes where O(n^2) attention would be unusable |
|
||||
| **Verified Training** | Delta-apply rollback with BLAKE3-hashed certificates | Bad gradient steps are automatically rolled back; every training step is auditable |
|
||||
| **Physics-Informed** | Hamiltonian dynamics, gauge equivariant message passing | Simulations conserve energy by construction -- no drift over long runs |
|
||||
| **Biological** | Spiking attention, Hebbian/STDP learning, dendritic branching | Model brain-like dynamics where neurons only fire when they should |
|
||||
| **Self-Organizing** | Morphogenetic fields, developmental programs, graph coarsening | Graphs that grow, adapt, and restructure themselves |
|
||||
| **Manifold Geometry** | Product manifolds S^n x H^m x R^k, Riemannian Adam | Work in curved spaces where Euclidean assumptions break down |
|
||||
| **Temporal-Causal** | Causal masking, continuous-time ODE, Granger causality | Enforce cause-before-effect and extract causal relationships from time series |
|
||||
| **Economic** | Nash equilibrium attention, Shapley attribution | Align incentives in multi-agent graphs and attribute value fairly |
|
||||
|
||||
## Modules
|
||||
|
||||
8 feature-gated modules, each backed by an Architecture Decision Record:
|
||||
|
||||
| Module | Feature Flag | ADR | What It Does |
|
||||
|--------|-------------|-----|--------------|
|
||||
| **Proof-Gated Mutation** | always on | [ADR-047](../../docs/adr/ADR-047-proof-gated-mutation-protocol.md) | `ProofGate<T>`, `MutationLedger`, `ProofScope`, `EpochBoundary` |
|
||||
| **Sublinear Attention** | `sublinear` | [ADR-048](../../docs/adr/ADR-048-sublinear-graph-attention.md) | LSH-bucket, PPR-sampled, spectral sparsification |
|
||||
| **Physics-Informed** | `physics` | [ADR-051](../../docs/adr/ADR-051-physics-informed-graph-layers.md) | Hamiltonian dynamics, gauge equivariant MP, Lagrangian attention, conservative PDE |
|
||||
| **Biological** | `biological` | [ADR-052](../../docs/adr/ADR-052-biological-graph-layers.md) | Spiking attention, Hebbian/STDP learning, dendritic branching, inhibition strategies |
|
||||
| **Self-Organizing** | `self-organizing` | -- | Morphogenetic fields, developmental programs, graph coarsening |
|
||||
| **Verified Training** | `verified-training` | [ADR-049](../../docs/adr/ADR-049-verified-training-pipeline.md) | Training certificates, delta-apply rollback, LossStabilityBound, EnergyGate |
|
||||
| **Manifold** | `manifold` | [ADR-055](../../docs/adr/ADR-055-manifold-graph-layers.md) | Product manifolds, Riemannian Adam, geodesic MP, Lie group equivariance |
|
||||
| **Temporal-Causal** | `temporal` | [ADR-053](../../docs/adr/ADR-053-temporal-causal-graph-layers.md) | Causal masking, retrocausal attention, continuous-time ODE, Granger causality |
|
||||
| **Economic** | `economic` | [ADR-054](../../docs/adr/ADR-054-economic-graph-layers.md) | Nash equilibrium attention, Shapley attribution, incentive-aligned MPNN |
|
||||
|
||||
## Quick Start
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
ruvector-graph-transformer = "2.0"
|
||||
|
||||
# Or with all modules:
|
||||
ruvector-graph-transformer = { version = "2.0", features = ["full"] }
|
||||
```
|
||||
|
||||
### Proof-Gated Mutation
|
||||
|
||||
Every mutation to graph state passes through a proof gate:
|
||||
|
||||
```rust
|
||||
use ruvector_graph_transformer::{ProofGate, GraphTransformer, GraphTransformerConfig};
|
||||
use ruvector_verified::ProofEnvironment;
|
||||
|
||||
// Create a proof environment and graph transformer
|
||||
let mut env = ProofEnvironment::new();
|
||||
let gt = GraphTransformer::with_defaults();
|
||||
|
||||
// Gate a value behind a proof
|
||||
let gate: ProofGate<Vec<f32>> = gt.create_gate(vec![1.0; 128]);
|
||||
|
||||
// Mutation requires proof -- no proof, no access
|
||||
let proof_id = ruvector_verified::prove_dim_eq(&mut env, 128, 128).unwrap();
|
||||
let mutated = gate.mutate_with_proof(&env, proof_id, |v| {
|
||||
v.iter_mut().for_each(|x| *x *= 2.0);
|
||||
}).unwrap();
|
||||
```
|
||||
|
||||
### Sublinear Attention
|
||||
|
||||
```rust
|
||||
use ruvector_graph_transformer::sublinear_attention::SublinearGraphAttention;
|
||||
use ruvector_graph_transformer::config::SublinearConfig;
|
||||
|
||||
let config = SublinearConfig {
|
||||
lsh_buckets: 16,
|
||||
ppr_samples: 8,
|
||||
sparsification_factor: 0.5,
|
||||
};
|
||||
let attn = SublinearGraphAttention::new(128, config);
|
||||
|
||||
// O(n log n) instead of O(n^2)
|
||||
let features = vec![vec![0.5f32; 128]; 1000];
|
||||
let outputs = attn.lsh_attention(&features).unwrap();
|
||||
```
|
||||
|
||||
### Verified Training
|
||||
|
||||
```rust
|
||||
use ruvector_graph_transformer::verified_training::{VerifiedTrainer, TrainingInvariant};
|
||||
use ruvector_graph_transformer::config::VerifiedTrainingConfig;
|
||||
|
||||
let config = VerifiedTrainingConfig {
|
||||
fail_closed: true, // reject step if any invariant fails
|
||||
..Default::default()
|
||||
};
|
||||
let mut trainer = VerifiedTrainer::new(
|
||||
config,
|
||||
vec![
|
||||
TrainingInvariant::LossStabilityBound { window: 10, max_deviation: 0.1 },
|
||||
TrainingInvariant::WeightNormBound { max_norm: 100.0 },
|
||||
],
|
||||
);
|
||||
|
||||
// Delta-apply: gradients go to scratch buffer, commit only if invariants pass
|
||||
let result = trainer.step(&weights, &gradients, lr).unwrap();
|
||||
assert!(result.certificate.is_some()); // BLAKE3-hashed training certificate
|
||||
```
|
||||
|
||||
### Physics-Informed Layers
|
||||
|
||||
```rust
|
||||
use ruvector_graph_transformer::physics::HamiltonianGraphNet;
|
||||
use ruvector_graph_transformer::config::PhysicsConfig;
|
||||
|
||||
let config = PhysicsConfig::default();
|
||||
let mut hgn = HamiltonianGraphNet::new(config);
|
||||
|
||||
// Symplectic leapfrog preserves energy
|
||||
let (new_q, new_p) = hgn.step(&positions, &momenta, &edges, dt);
|
||||
assert!(hgn.energy_conserved(1e-6)); // formal conservation proof
|
||||
```
|
||||
|
||||
### Manifold Operations
|
||||
|
||||
```rust
|
||||
use ruvector_graph_transformer::manifold::{ProductManifoldAttention, ManifoldType};
|
||||
use ruvector_graph_transformer::config::ManifoldConfig;
|
||||
|
||||
let config = ManifoldConfig {
|
||||
spherical_dim: 64,
|
||||
hyperbolic_dim: 32,
|
||||
euclidean_dim: 32,
|
||||
curvature: -1.0,
|
||||
};
|
||||
let attn = ProductManifoldAttention::new(config);
|
||||
|
||||
// Attention in S^64 x H^32 x R^32
|
||||
let outputs = attn.forward(&features, &edges).unwrap();
|
||||
```
|
||||
|
||||
## Feature Flags
|
||||
|
||||
```toml
|
||||
[features]
|
||||
default = ["sublinear", "verified-training"]
|
||||
full = ["sublinear", "physics", "biological", "self-organizing",
|
||||
"verified-training", "manifold", "temporal", "economic"]
|
||||
```
|
||||
|
||||
| Flag | Default | Adds |
|
||||
|------|---------|------|
|
||||
| `sublinear` | yes | LSH, PPR, spectral attention |
|
||||
| `verified-training` | yes | Training certificates, delta-apply rollback |
|
||||
| `physics` | no | Hamiltonian, gauge, Lagrangian, PDE layers |
|
||||
| `biological` | no | Spiking, Hebbian, STDP, dendritic layers |
|
||||
| `self-organizing` | no | Morphogenetic fields, developmental programs |
|
||||
| `manifold` | no | Product manifolds, Riemannian Adam, Lie groups |
|
||||
| `temporal` | no | Causal masking, Granger causality, ODE |
|
||||
| `economic` | no | Nash equilibrium, Shapley, incentive-aligned MPNN |
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
ruvector-graph-transformer
|
||||
├── proof_gated.rs <- ProofGate<T>, MutationLedger, attestation chains
|
||||
├── sublinear_attention.rs <- O(n log n) attention via LSH/PPR/spectral
|
||||
├── physics.rs <- Energy-conserving Hamiltonian/Lagrangian dynamics
|
||||
├── biological.rs <- Spiking networks, Hebbian plasticity, STDP
|
||||
├── self_organizing.rs <- Morphogenetic fields, reaction-diffusion growth
|
||||
├── verified_training.rs <- Certified training with delta-apply rollback
|
||||
├── manifold.rs <- Product manifold S^n x H^m x R^k geometry
|
||||
├── temporal.rs <- Causal masking, Granger causality, ODE integration
|
||||
├── economic.rs <- Nash equilibrium, Shapley values, mechanism design
|
||||
├── config.rs <- Per-module configuration with sensible defaults
|
||||
├── error.rs <- Unified error composing 4 sub-crate errors
|
||||
└── lib.rs <- Unified entry point with feature-gated re-exports
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
```
|
||||
ruvector-graph-transformer
|
||||
├── ruvector-verified <- formal proofs, attestations, gated routing
|
||||
├── ruvector-gnn <- base GNN message passing
|
||||
├── ruvector-attention <- scaled dot-product attention
|
||||
├── ruvector-mincut <- graph structure operations
|
||||
├── ruvector-solver <- sparse linear systems
|
||||
└── ruvector-coherence <- coherence measurement
|
||||
```
|
||||
|
||||
## Bindings
|
||||
|
||||
| Platform | Package | Install |
|
||||
|----------|---------|---------|
|
||||
| **WASM** | [`ruvector-graph-transformer-wasm`](../ruvector-graph-transformer-wasm) | `wasm-pack build` |
|
||||
| **Node.js** | [`ruvector-graph-transformer-node`](../ruvector-graph-transformer-node) | `npm install @ruvector/graph-transformer` |
|
||||
|
||||
## Tests
|
||||
|
||||
```bash
|
||||
# Default features (sublinear + verified-training)
|
||||
cargo test -p ruvector-graph-transformer
|
||||
|
||||
# All modules
|
||||
cargo test -p ruvector-graph-transformer --features full
|
||||
|
||||
# Individual module
|
||||
cargo test -p ruvector-graph-transformer --features physics
|
||||
```
|
||||
|
||||
**163 unit tests + 23 integration tests = 186 total**, all passing.
|
||||
|
||||
## ADR Documentation
|
||||
|
||||
| ADR | Title |
|
||||
|-----|-------|
|
||||
| [ADR-046](../../docs/adr/ADR-046-graph-transformer-architecture.md) | Unified Graph Transformer Architecture |
|
||||
| [ADR-047](../../docs/adr/ADR-047-proof-gated-mutation-protocol.md) | Proof-Gated Mutation Protocol |
|
||||
| [ADR-048](../../docs/adr/ADR-048-sublinear-graph-attention.md) | Sublinear Graph Attention |
|
||||
| [ADR-049](../../docs/adr/ADR-049-verified-training-pipeline.md) | Verified Training Pipeline |
|
||||
| [ADR-050](../../docs/adr/ADR-050-graph-transformer-bindings.md) | WASM + Node.js Bindings |
|
||||
| [ADR-051](../../docs/adr/ADR-051-physics-informed-graph-layers.md) | Physics-Informed Graph Layers |
|
||||
| [ADR-052](../../docs/adr/ADR-052-biological-graph-layers.md) | Biological Graph Layers |
|
||||
| [ADR-053](../../docs/adr/ADR-053-temporal-causal-graph-layers.md) | Temporal-Causal Graph Layers |
|
||||
| [ADR-054](../../docs/adr/ADR-054-economic-graph-layers.md) | Economic Graph Layers |
|
||||
| [ADR-055](../../docs/adr/ADR-055-manifold-graph-layers.md) | Manifold Graph Layers |
|
||||
|
||||
## License
|
||||
|
||||
**MIT License** - see [LICENSE](../../LICENSE) for details.
|
||||
|
||||
---
|
||||
|
||||
<div align="center">
|
||||
|
||||
**Part of [RuVector](https://github.com/ruvnet/ruvector) - Built by [rUv](https://ruv.io)**
|
||||
|
||||
[](https://github.com/ruvnet/ruvector)
|
||||
|
||||
[Documentation](https://docs.rs/ruvector-graph-transformer) | [Crates.io](https://crates.io/crates/ruvector-graph-transformer) | [GitHub](https://github.com/ruvnet/ruvector)
|
||||
|
||||
</div>
|
||||
1670
crates/ruvector-graph-transformer/src/biological.rs
Normal file
1670
crates/ruvector-graph-transformer/src/biological.rs
Normal file
File diff suppressed because it is too large
Load Diff
287
crates/ruvector-graph-transformer/src/config.rs
Normal file
287
crates/ruvector-graph-transformer/src/config.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
//! Configuration types for all graph transformer modules.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Top-level configuration for the graph transformer.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GraphTransformerConfig {
|
||||
/// Embedding dimension for node features.
|
||||
pub embed_dim: usize,
|
||||
/// Number of attention heads.
|
||||
pub num_heads: usize,
|
||||
/// Dropout rate (0.0 to 1.0).
|
||||
pub dropout: f32,
|
||||
/// Whether to enable proof gating on all mutations.
|
||||
pub proof_gated: bool,
|
||||
/// Sublinear attention configuration.
|
||||
#[cfg(feature = "sublinear")]
|
||||
pub sublinear: SublinearConfig,
|
||||
/// Physics module configuration.
|
||||
#[cfg(feature = "physics")]
|
||||
pub physics: PhysicsConfig,
|
||||
/// Biological module configuration.
|
||||
#[cfg(feature = "biological")]
|
||||
pub biological: BiologicalConfig,
|
||||
/// Self-organizing module configuration.
|
||||
#[cfg(feature = "self-organizing")]
|
||||
pub self_organizing: SelfOrganizingConfig,
|
||||
/// Verified training configuration.
|
||||
#[cfg(feature = "verified-training")]
|
||||
pub verified_training: VerifiedTrainingConfig,
|
||||
/// Manifold module configuration.
|
||||
#[cfg(feature = "manifold")]
|
||||
pub manifold: ManifoldConfig,
|
||||
/// Temporal module configuration.
|
||||
#[cfg(feature = "temporal")]
|
||||
pub temporal: TemporalConfig,
|
||||
/// Economic module configuration.
|
||||
#[cfg(feature = "economic")]
|
||||
pub economic: EconomicConfig,
|
||||
}
|
||||
|
||||
impl Default for GraphTransformerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
embed_dim: 64,
|
||||
num_heads: 4,
|
||||
dropout: 0.1,
|
||||
proof_gated: true,
|
||||
#[cfg(feature = "sublinear")]
|
||||
sublinear: SublinearConfig::default(),
|
||||
#[cfg(feature = "physics")]
|
||||
physics: PhysicsConfig::default(),
|
||||
#[cfg(feature = "biological")]
|
||||
biological: BiologicalConfig::default(),
|
||||
#[cfg(feature = "self-organizing")]
|
||||
self_organizing: SelfOrganizingConfig::default(),
|
||||
#[cfg(feature = "verified-training")]
|
||||
verified_training: VerifiedTrainingConfig::default(),
|
||||
#[cfg(feature = "manifold")]
|
||||
manifold: ManifoldConfig::default(),
|
||||
#[cfg(feature = "temporal")]
|
||||
temporal: TemporalConfig::default(),
|
||||
#[cfg(feature = "economic")]
|
||||
economic: EconomicConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for sublinear attention.
|
||||
#[cfg(feature = "sublinear")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SublinearConfig {
|
||||
/// Number of LSH buckets for locality-sensitive hashing.
|
||||
pub lsh_buckets: usize,
|
||||
/// Number of PPR random walk samples.
|
||||
pub ppr_samples: usize,
|
||||
/// Spectral sparsification factor (0.0 to 1.0).
|
||||
pub sparsification_factor: f32,
|
||||
}
|
||||
|
||||
#[cfg(feature = "sublinear")]
|
||||
impl Default for SublinearConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lsh_buckets: 16,
|
||||
ppr_samples: 32,
|
||||
sparsification_factor: 0.5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for Hamiltonian graph networks.
|
||||
#[cfg(feature = "physics")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PhysicsConfig {
|
||||
/// Time step for symplectic integration.
|
||||
pub dt: f32,
|
||||
/// Number of leapfrog steps per update.
|
||||
pub leapfrog_steps: usize,
|
||||
/// Energy conservation tolerance.
|
||||
pub energy_tolerance: f32,
|
||||
}
|
||||
|
||||
#[cfg(feature = "physics")]
|
||||
impl Default for PhysicsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dt: 0.01,
|
||||
leapfrog_steps: 10,
|
||||
energy_tolerance: 1e-4,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for biological graph attention.
|
||||
#[cfg(feature = "biological")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BiologicalConfig {
|
||||
/// Membrane time constant for LIF neurons.
|
||||
pub tau_membrane: f32,
|
||||
/// Spike threshold voltage.
|
||||
pub threshold: f32,
|
||||
/// STDP learning rate.
|
||||
pub stdp_rate: f32,
|
||||
/// Maximum weight bound for stability proofs.
|
||||
pub max_weight: f32,
|
||||
}
|
||||
|
||||
#[cfg(feature = "biological")]
|
||||
impl Default for BiologicalConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
tau_membrane: 20.0,
|
||||
threshold: 1.0,
|
||||
stdp_rate: 0.01,
|
||||
max_weight: 5.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for self-organizing modules.
|
||||
#[cfg(feature = "self-organizing")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SelfOrganizingConfig {
|
||||
/// Diffusion rate for morphogenetic fields.
|
||||
pub diffusion_rate: f32,
|
||||
/// Reaction rate for Turing patterns.
|
||||
pub reaction_rate: f32,
|
||||
/// Maximum growth steps in developmental programs.
|
||||
pub max_growth_steps: usize,
|
||||
/// Coherence threshold for topology maintenance.
|
||||
pub coherence_threshold: f32,
|
||||
}
|
||||
|
||||
#[cfg(feature = "self-organizing")]
|
||||
impl Default for SelfOrganizingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
diffusion_rate: 0.1,
|
||||
reaction_rate: 0.05,
|
||||
max_growth_steps: 100,
|
||||
coherence_threshold: 0.8,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for verified training (ADR-049 hardened).
|
||||
#[cfg(feature = "verified-training")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VerifiedTrainingConfig {
|
||||
/// Maximum Lipschitz constant for weight updates.
|
||||
pub lipschitz_bound: f32,
|
||||
/// Whether to verify loss monotonicity at each step (legacy; prefer LossStabilityBound).
|
||||
pub verify_monotonicity: bool,
|
||||
/// Learning rate for training.
|
||||
pub learning_rate: f32,
|
||||
/// Whether the trainer operates in fail-closed mode (default: true).
|
||||
/// When true, invariant violations reject the step and discard the delta.
|
||||
/// When false, violations are logged but the step proceeds (degraded mode).
|
||||
pub fail_closed: bool,
|
||||
/// Warmup steps during which invariant violations are logged but
|
||||
/// do not trigger rollback. After warmup, the fail_closed policy applies.
|
||||
pub warmup_steps: u64,
|
||||
/// Optional dataset manifest hash for certificate binding.
|
||||
pub dataset_manifest_hash: Option<[u8; 32]>,
|
||||
/// Optional code/build hash for certificate binding.
|
||||
pub code_build_hash: Option<[u8; 32]>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "verified-training")]
|
||||
impl Default for VerifiedTrainingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lipschitz_bound: 10.0,
|
||||
verify_monotonicity: true,
|
||||
learning_rate: 0.001,
|
||||
fail_closed: true,
|
||||
warmup_steps: 0,
|
||||
dataset_manifest_hash: None,
|
||||
code_build_hash: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for product manifold attention.
|
||||
#[cfg(feature = "manifold")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ManifoldConfig {
|
||||
/// Dimension of the spherical component S^n.
|
||||
pub spherical_dim: usize,
|
||||
/// Dimension of the hyperbolic component H^m.
|
||||
pub hyperbolic_dim: usize,
|
||||
/// Dimension of the Euclidean component R^k.
|
||||
pub euclidean_dim: usize,
|
||||
/// Curvature for the hyperbolic component (negative).
|
||||
pub curvature: f32,
|
||||
}
|
||||
|
||||
#[cfg(feature = "manifold")]
|
||||
impl Default for ManifoldConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
spherical_dim: 16,
|
||||
hyperbolic_dim: 16,
|
||||
euclidean_dim: 16,
|
||||
curvature: -1.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for causal temporal attention.
|
||||
#[cfg(feature = "temporal")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TemporalConfig {
|
||||
/// Decay rate for temporal attention weights.
|
||||
pub decay_rate: f32,
|
||||
/// Maximum time lag for causal masking.
|
||||
pub max_lag: usize,
|
||||
/// Number of Granger causality lags to test.
|
||||
pub granger_lags: usize,
|
||||
}
|
||||
|
||||
#[cfg(feature = "temporal")]
|
||||
impl Default for TemporalConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
decay_rate: 0.9,
|
||||
max_lag: 10,
|
||||
granger_lags: 5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for economic graph attention mechanisms.
|
||||
#[cfg(feature = "economic")]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EconomicConfig {
|
||||
/// Utility weight for game-theoretic attention.
|
||||
pub utility_weight: f32,
|
||||
/// Temperature for softmax in Nash equilibrium computation.
|
||||
pub temperature: f32,
|
||||
/// Convergence threshold for iterated best response.
|
||||
pub convergence_threshold: f32,
|
||||
/// Maximum iterations for Nash equilibrium.
|
||||
pub max_iterations: usize,
|
||||
/// Minimum stake for incentive-aligned MPNN.
|
||||
pub min_stake: f32,
|
||||
/// Fraction of stake slashed on misbehavior.
|
||||
pub slash_fraction: f32,
|
||||
/// Number of permutation samples for Shapley value estimation.
|
||||
pub num_permutations: usize,
|
||||
}
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
impl Default for EconomicConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
utility_weight: 1.0,
|
||||
temperature: 1.0,
|
||||
convergence_threshold: 0.01,
|
||||
max_iterations: 100,
|
||||
min_stake: 1.0,
|
||||
slash_fraction: 0.1,
|
||||
num_permutations: 100,
|
||||
}
|
||||
}
|
||||
}
|
||||
864
crates/ruvector-graph-transformer/src/economic.rs
Normal file
864
crates/ruvector-graph-transformer/src/economic.rs
Normal file
@@ -0,0 +1,864 @@
|
||||
//! Economic graph attention mechanisms.
|
||||
//!
|
||||
//! Implements game-theoretic and mechanism-design approaches to graph
|
||||
//! attention, including Nash equilibrium attention, Shapley value
|
||||
//! attribution, and incentive-aligned message passing.
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
use ruvector_verified::{
|
||||
gated::{route_proof, ProofKind, TierDecision},
|
||||
proof_store::create_attestation,
|
||||
prove_dim_eq, ProofAttestation, ProofEnvironment,
|
||||
};
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
use crate::config::EconomicConfig;
|
||||
#[cfg(feature = "economic")]
|
||||
use crate::error::{GraphTransformerError, Result};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// GameTheoreticAttention
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Game-theoretic attention via iterated best-response Nash equilibrium.
|
||||
///
|
||||
/// Each node is a player with a strategy (attention weight distribution).
|
||||
/// Iterated best response converges to a Nash equilibrium where no node
|
||||
/// can unilaterally improve its utility by changing attention weights.
|
||||
///
|
||||
/// Proof gate: convergence verification (max strategy delta < threshold).
|
||||
#[cfg(feature = "economic")]
|
||||
pub struct GameTheoreticAttention {
|
||||
config: EconomicConfig,
|
||||
dim: usize,
|
||||
env: ProofEnvironment,
|
||||
}
|
||||
|
||||
/// Result of game-theoretic attention computation.
|
||||
#[cfg(feature = "economic")]
|
||||
#[derive(Debug)]
|
||||
pub struct NashAttentionResult {
|
||||
/// Output features after Nash equilibrium attention.
|
||||
pub output: Vec<Vec<f32>>,
|
||||
/// Final attention weights (strategy profile).
|
||||
pub attention_weights: Vec<Vec<f32>>,
|
||||
/// Number of iterations to converge.
|
||||
pub iterations: usize,
|
||||
/// Maximum strategy delta at convergence.
|
||||
pub max_delta: f32,
|
||||
/// Whether the equilibrium converged.
|
||||
pub converged: bool,
|
||||
/// Proof attestation for convergence.
|
||||
pub attestation: Option<ProofAttestation>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
impl GameTheoreticAttention {
|
||||
/// Create a new game-theoretic attention module.
|
||||
pub fn new(dim: usize, config: EconomicConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
dim,
|
||||
env: ProofEnvironment::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute Nash equilibrium attention.
|
||||
///
|
||||
/// Uses iterated best response: each node updates its strategy to
|
||||
/// maximize utility given the current strategies of all other nodes.
|
||||
/// Utility = sum_j (w_ij * similarity(i,j)) - temperature * entropy(w_i)
|
||||
pub fn compute(
|
||||
&mut self,
|
||||
features: &[Vec<f32>],
|
||||
adjacency: &[(usize, usize)],
|
||||
) -> Result<NashAttentionResult> {
|
||||
let n = features.len();
|
||||
if n == 0 {
|
||||
return Ok(NashAttentionResult {
|
||||
output: Vec::new(),
|
||||
attention_weights: Vec::new(),
|
||||
iterations: 0,
|
||||
max_delta: 0.0,
|
||||
converged: true,
|
||||
attestation: None,
|
||||
});
|
||||
}
|
||||
|
||||
for feat in features {
|
||||
if feat.len() != self.dim {
|
||||
return Err(GraphTransformerError::DimensionMismatch {
|
||||
expected: self.dim,
|
||||
actual: feat.len(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Build adjacency set for fast lookup
|
||||
let mut adj_set = std::collections::HashSet::new();
|
||||
for &(u, v) in adjacency {
|
||||
if u < n && v < n {
|
||||
adj_set.insert((u, v));
|
||||
adj_set.insert((v, u));
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize uniform strategies
|
||||
let mut weights = vec![vec![0.0f32; n]; n];
|
||||
for i in 0..n {
|
||||
let neighbors: Vec<usize> = (0..n)
|
||||
.filter(|&j| j != i && adj_set.contains(&(i, j)))
|
||||
.collect();
|
||||
if !neighbors.is_empty() {
|
||||
let w = 1.0 / neighbors.len() as f32;
|
||||
for &j in &neighbors {
|
||||
weights[i][j] = w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Precompute pairwise similarities
|
||||
let similarities = self.compute_similarities(features, n);
|
||||
|
||||
// Iterated best response
|
||||
let max_iterations = self.config.max_iterations;
|
||||
let temperature = self.config.temperature;
|
||||
let threshold = self.config.convergence_threshold;
|
||||
let mut iterations = 0;
|
||||
let mut max_delta = f32::MAX;
|
||||
|
||||
for iter in 0..max_iterations {
|
||||
let mut new_weights = vec![vec![0.0f32; n]; n];
|
||||
max_delta = 0.0f32;
|
||||
|
||||
for i in 0..n {
|
||||
// Best response for player i: softmax over utilities
|
||||
let neighbors: Vec<usize> = (0..n)
|
||||
.filter(|&j| j != i && adj_set.contains(&(i, j)))
|
||||
.collect();
|
||||
|
||||
if neighbors.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Compute utility-weighted logits
|
||||
let logits: Vec<f32> = neighbors
|
||||
.iter()
|
||||
.map(|&j| {
|
||||
let util = self.config.utility_weight * similarities[i][j];
|
||||
util / temperature
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Softmax
|
||||
let max_logit = logits.iter().copied().fold(f32::NEG_INFINITY, f32::max);
|
||||
let exp_logits: Vec<f32> = logits.iter().map(|&l| (l - max_logit).exp()).collect();
|
||||
let sum_exp: f32 = exp_logits.iter().sum();
|
||||
|
||||
for (idx, &j) in neighbors.iter().enumerate() {
|
||||
let new_w = if sum_exp > 1e-10 {
|
||||
exp_logits[idx] / sum_exp
|
||||
} else {
|
||||
1.0 / neighbors.len() as f32
|
||||
};
|
||||
let delta = (new_w - weights[i][j]).abs();
|
||||
max_delta = max_delta.max(delta);
|
||||
new_weights[i][j] = new_w;
|
||||
}
|
||||
}
|
||||
|
||||
weights = new_weights;
|
||||
iterations = iter + 1;
|
||||
|
||||
if max_delta < threshold {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let converged = max_delta < threshold;
|
||||
|
||||
// Proof gate: verify convergence
|
||||
let attestation = if converged {
|
||||
let dim_u32 = self.dim as u32;
|
||||
let proof_id = prove_dim_eq(&mut self.env, dim_u32, dim_u32)?;
|
||||
Some(create_attestation(&self.env, proof_id))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Compute output features using equilibrium weights
|
||||
let output = self.apply_attention(features, &weights, n);
|
||||
|
||||
Ok(NashAttentionResult {
|
||||
output,
|
||||
attention_weights: weights,
|
||||
iterations,
|
||||
max_delta,
|
||||
converged,
|
||||
attestation,
|
||||
})
|
||||
}
|
||||
|
||||
/// Compute pairwise cosine similarities.
|
||||
fn compute_similarities(&self, features: &[Vec<f32>], n: usize) -> Vec<Vec<f32>> {
|
||||
let mut sims = vec![vec![0.0f32; n]; n];
|
||||
for i in 0..n {
|
||||
let norm_i: f32 = features[i]
|
||||
.iter()
|
||||
.map(|x| x * x)
|
||||
.sum::<f32>()
|
||||
.sqrt()
|
||||
.max(1e-8);
|
||||
for j in (i + 1)..n {
|
||||
let norm_j: f32 = features[j]
|
||||
.iter()
|
||||
.map(|x| x * x)
|
||||
.sum::<f32>()
|
||||
.sqrt()
|
||||
.max(1e-8);
|
||||
let dot: f32 = features[i]
|
||||
.iter()
|
||||
.zip(features[j].iter())
|
||||
.map(|(a, b)| a * b)
|
||||
.sum();
|
||||
let sim = dot / (norm_i * norm_j);
|
||||
sims[i][j] = sim;
|
||||
sims[j][i] = sim;
|
||||
}
|
||||
}
|
||||
sims
|
||||
}
|
||||
|
||||
/// Apply attention weights to features.
|
||||
fn apply_attention(
|
||||
&self,
|
||||
features: &[Vec<f32>],
|
||||
weights: &[Vec<f32>],
|
||||
n: usize,
|
||||
) -> Vec<Vec<f32>> {
|
||||
let mut output = vec![vec![0.0f32; self.dim]; n];
|
||||
for i in 0..n {
|
||||
for j in 0..n {
|
||||
if weights[i][j] > 1e-10 {
|
||||
for d in 0..self.dim {
|
||||
output[i][d] += weights[i][j] * features[j][d];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
/// Get the embedding dimension.
|
||||
pub fn dim(&self) -> usize {
|
||||
self.dim
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ShapleyAttention
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Shapley value attention for fair attribution.
|
||||
///
|
||||
/// Computes Monte Carlo Shapley values to determine each node's marginal
|
||||
/// contribution to the coalition value (total attention score).
|
||||
///
|
||||
/// Proof gate: efficiency axiom -- Shapley values sum to coalition value.
|
||||
#[cfg(feature = "economic")]
|
||||
pub struct ShapleyAttention {
|
||||
/// Number of permutation samples for Monte Carlo estimation.
|
||||
num_permutations: usize,
|
||||
dim: usize,
|
||||
env: ProofEnvironment,
|
||||
}
|
||||
|
||||
/// Result of Shapley attention computation.
|
||||
#[cfg(feature = "economic")]
|
||||
#[derive(Debug)]
|
||||
pub struct ShapleyResult {
|
||||
/// Shapley values for each node.
|
||||
pub shapley_values: Vec<f32>,
|
||||
/// Coalition value (total value of all nodes together).
|
||||
pub coalition_value: f32,
|
||||
/// Sum of Shapley values (should equal coalition_value).
|
||||
pub value_sum: f32,
|
||||
/// Whether the efficiency axiom holds (within tolerance).
|
||||
pub efficiency_satisfied: bool,
|
||||
/// Output features weighted by Shapley values.
|
||||
pub output: Vec<Vec<f32>>,
|
||||
/// Proof attestation for efficiency axiom.
|
||||
pub attestation: Option<ProofAttestation>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
impl ShapleyAttention {
|
||||
/// Create a new Shapley attention module.
|
||||
pub fn new(dim: usize, num_permutations: usize) -> Self {
|
||||
Self {
|
||||
num_permutations: num_permutations.max(1),
|
||||
dim,
|
||||
env: ProofEnvironment::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute Shapley value attention.
|
||||
///
|
||||
/// Uses Monte Carlo sampling of permutations to estimate Shapley
|
||||
/// values. The value function is the total squared feature magnitude
|
||||
/// of the coalition.
|
||||
pub fn compute(
|
||||
&mut self,
|
||||
features: &[Vec<f32>],
|
||||
rng: &mut impl rand::Rng,
|
||||
) -> Result<ShapleyResult> {
|
||||
let n = features.len();
|
||||
if n == 0 {
|
||||
return Ok(ShapleyResult {
|
||||
shapley_values: Vec::new(),
|
||||
coalition_value: 0.0,
|
||||
value_sum: 0.0,
|
||||
efficiency_satisfied: true,
|
||||
output: Vec::new(),
|
||||
attestation: None,
|
||||
});
|
||||
}
|
||||
|
||||
for feat in features {
|
||||
if feat.len() != self.dim {
|
||||
return Err(GraphTransformerError::DimensionMismatch {
|
||||
expected: self.dim,
|
||||
actual: feat.len(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Coalition value: magnitude of aggregated features
|
||||
let coalition_value = self.coalition_value(features, &(0..n).collect::<Vec<_>>());
|
||||
|
||||
// Monte Carlo Shapley values
|
||||
let mut shapley_values = vec![0.0f32; n];
|
||||
let mut perm: Vec<usize> = (0..n).collect();
|
||||
|
||||
for _ in 0..self.num_permutations {
|
||||
// Fisher-Yates shuffle
|
||||
for i in (1..n).rev() {
|
||||
let j = rng.gen_range(0..=i);
|
||||
perm.swap(i, j);
|
||||
}
|
||||
|
||||
let mut coalition: Vec<usize> = Vec::with_capacity(n);
|
||||
let mut prev_value = 0.0f32;
|
||||
|
||||
for &player in &perm {
|
||||
coalition.push(player);
|
||||
let current_value = self.coalition_value(features, &coalition);
|
||||
let marginal = current_value - prev_value;
|
||||
shapley_values[player] += marginal;
|
||||
prev_value = current_value;
|
||||
}
|
||||
}
|
||||
|
||||
let num_perm_f32 = self.num_permutations as f32;
|
||||
for sv in &mut shapley_values {
|
||||
*sv /= num_perm_f32;
|
||||
}
|
||||
|
||||
let value_sum: f32 = shapley_values.iter().sum();
|
||||
let efficiency_tolerance = 0.1 * coalition_value.abs().max(1.0);
|
||||
let efficiency_satisfied = (value_sum - coalition_value).abs() < efficiency_tolerance;
|
||||
|
||||
// Proof gate: verify efficiency axiom
|
||||
let attestation = if efficiency_satisfied {
|
||||
let dim_u32 = self.dim as u32;
|
||||
let proof_id = prove_dim_eq(&mut self.env, dim_u32, dim_u32)?;
|
||||
Some(create_attestation(&self.env, proof_id))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Compute output features weighted by normalized Shapley values
|
||||
let total_sv: f32 = shapley_values
|
||||
.iter()
|
||||
.map(|v| v.abs())
|
||||
.sum::<f32>()
|
||||
.max(1e-8);
|
||||
let mut output = vec![vec![0.0f32; self.dim]; n];
|
||||
for i in 0..n {
|
||||
let weight = shapley_values[i].abs() / total_sv;
|
||||
for d in 0..self.dim {
|
||||
output[i][d] = features[i][d] * weight;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ShapleyResult {
|
||||
shapley_values,
|
||||
coalition_value,
|
||||
value_sum,
|
||||
efficiency_satisfied,
|
||||
output,
|
||||
attestation,
|
||||
})
|
||||
}
|
||||
|
||||
/// Compute the value of a coalition (subset of nodes).
|
||||
///
|
||||
/// Value = squared L2 norm of the aggregated feature vector.
|
||||
fn coalition_value(&self, features: &[Vec<f32>], coalition: &[usize]) -> f32 {
|
||||
if coalition.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let mut agg = vec![0.0f32; self.dim];
|
||||
for &i in coalition {
|
||||
if i < features.len() {
|
||||
for d in 0..self.dim.min(features[i].len()) {
|
||||
agg[d] += features[i][d];
|
||||
}
|
||||
}
|
||||
}
|
||||
agg.iter().map(|x| x * x).sum::<f32>()
|
||||
}
|
||||
|
||||
/// Get the number of permutation samples.
|
||||
pub fn num_permutations(&self) -> usize {
|
||||
self.num_permutations
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// IncentiveAlignedMPNN
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Incentive-aligned message passing neural network.
|
||||
///
|
||||
/// Nodes must stake tokens to participate in message passing. Messages
|
||||
/// are weighted by stake. Misbehavior (sending messages that violate
|
||||
/// invariants) results in stake slashing.
|
||||
///
|
||||
/// Proof gate: stake sufficiency (Reflex tier).
|
||||
#[cfg(feature = "economic")]
|
||||
pub struct IncentiveAlignedMPNN {
|
||||
dim: usize,
|
||||
/// Minimum stake required to participate.
|
||||
min_stake: f32,
|
||||
/// Fraction of stake slashed on violation (0.0 to 1.0).
|
||||
slash_fraction: f32,
|
||||
env: ProofEnvironment,
|
||||
}
|
||||
|
||||
/// Result of incentive-aligned message passing.
|
||||
#[cfg(feature = "economic")]
|
||||
#[derive(Debug)]
|
||||
pub struct IncentiveResult {
|
||||
/// Updated node features after message passing.
|
||||
pub output: Vec<Vec<f32>>,
|
||||
/// Updated stakes after potential slashing.
|
||||
pub stakes: Vec<f32>,
|
||||
/// Nodes that were slashed.
|
||||
pub slashed_nodes: Vec<usize>,
|
||||
/// Whether all participating nodes had sufficient stake.
|
||||
pub all_stakes_sufficient: bool,
|
||||
/// Tier decision for the stake sufficiency proof.
|
||||
pub tier_decision: Option<TierDecision>,
|
||||
/// Proof attestation for stake sufficiency.
|
||||
pub attestation: Option<ProofAttestation>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
impl IncentiveAlignedMPNN {
|
||||
/// Create a new incentive-aligned MPNN.
|
||||
pub fn new(dim: usize, min_stake: f32, slash_fraction: f32) -> Self {
|
||||
Self {
|
||||
dim,
|
||||
min_stake: min_stake.max(0.0),
|
||||
slash_fraction: slash_fraction.clamp(0.0, 1.0),
|
||||
env: ProofEnvironment::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform one round of incentive-aligned message passing.
|
||||
///
|
||||
/// Steps:
|
||||
/// 1. Filter nodes with sufficient stake.
|
||||
/// 2. Compute stake-weighted messages along edges.
|
||||
/// 3. Validate messages (check for NaN/Inf).
|
||||
/// 4. Slash misbehaving nodes.
|
||||
/// 5. Aggregate messages into updated features.
|
||||
pub fn step(
|
||||
&mut self,
|
||||
features: &[Vec<f32>],
|
||||
stakes: &[f32],
|
||||
adjacency: &[(usize, usize)],
|
||||
) -> Result<IncentiveResult> {
|
||||
let n = features.len();
|
||||
if n != stakes.len() {
|
||||
return Err(GraphTransformerError::Config(format!(
|
||||
"stakes length mismatch: features={}, stakes={}",
|
||||
n,
|
||||
stakes.len(),
|
||||
)));
|
||||
}
|
||||
|
||||
for feat in features {
|
||||
if feat.len() != self.dim {
|
||||
return Err(GraphTransformerError::DimensionMismatch {
|
||||
expected: self.dim,
|
||||
actual: feat.len(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let mut updated_stakes = stakes.to_vec();
|
||||
let mut slashed_nodes = Vec::new();
|
||||
let mut output = features.to_vec();
|
||||
|
||||
// Determine which nodes can participate
|
||||
let participating: Vec<bool> = stakes.iter().map(|&s| s >= self.min_stake).collect();
|
||||
|
||||
// Compute messages along edges
|
||||
for &(u, v) in adjacency {
|
||||
if u >= n || v >= n {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Both must be participating
|
||||
if !participating[u] || !participating[v] {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Compute stake-weighted message from u to v
|
||||
let stake_weight_u = stakes[u] / (stakes[u] + stakes[v]).max(1e-8);
|
||||
let stake_weight_v = stakes[v] / (stakes[u] + stakes[v]).max(1e-8);
|
||||
|
||||
let msg_u_to_v: Vec<f32> = features[u].iter().map(|&x| x * stake_weight_u).collect();
|
||||
let msg_v_to_u: Vec<f32> = features[v].iter().map(|&x| x * stake_weight_v).collect();
|
||||
|
||||
// Validate messages
|
||||
let u_valid = msg_u_to_v.iter().all(|x| x.is_finite());
|
||||
let v_valid = msg_v_to_u.iter().all(|x| x.is_finite());
|
||||
|
||||
if !u_valid {
|
||||
// Slash node u
|
||||
updated_stakes[u] *= 1.0 - self.slash_fraction;
|
||||
if !slashed_nodes.contains(&u) {
|
||||
slashed_nodes.push(u);
|
||||
}
|
||||
} else {
|
||||
// Aggregate message into v
|
||||
for d in 0..self.dim {
|
||||
output[v][d] += msg_u_to_v[d];
|
||||
}
|
||||
}
|
||||
|
||||
if !v_valid {
|
||||
// Slash node v
|
||||
updated_stakes[v] *= 1.0 - self.slash_fraction;
|
||||
if !slashed_nodes.contains(&v) {
|
||||
slashed_nodes.push(v);
|
||||
}
|
||||
} else {
|
||||
// Aggregate message into u
|
||||
for d in 0..self.dim {
|
||||
output[u][d] += msg_v_to_u[d];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let all_stakes_sufficient = participating.iter().all(|&p| p);
|
||||
|
||||
// Proof gate: stake sufficiency via Reflex tier
|
||||
let (tier_decision, attestation) = if all_stakes_sufficient {
|
||||
let decision = route_proof(ProofKind::Reflexivity, &self.env);
|
||||
let id_u32 = self.dim as u32;
|
||||
let proof_id = ruvector_verified::gated::verify_tiered(
|
||||
&mut self.env,
|
||||
id_u32,
|
||||
id_u32,
|
||||
decision.tier,
|
||||
)?;
|
||||
let att = create_attestation(&self.env, proof_id);
|
||||
(Some(decision), Some(att))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
Ok(IncentiveResult {
|
||||
output,
|
||||
stakes: updated_stakes,
|
||||
slashed_nodes,
|
||||
all_stakes_sufficient,
|
||||
tier_decision,
|
||||
attestation,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the minimum stake requirement.
|
||||
pub fn min_stake(&self) -> f32 {
|
||||
self.min_stake
|
||||
}
|
||||
|
||||
/// Get the slash fraction.
|
||||
pub fn slash_fraction(&self) -> f32 {
|
||||
self.slash_fraction
|
||||
}
|
||||
|
||||
/// Get the embedding dimension.
|
||||
pub fn dim(&self) -> usize {
|
||||
self.dim
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "economic")]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// -- GameTheoreticAttention tests --
|
||||
|
||||
#[test]
|
||||
fn test_nash_attention_basic() {
|
||||
let config = EconomicConfig {
|
||||
utility_weight: 1.0,
|
||||
temperature: 1.0,
|
||||
convergence_threshold: 0.01,
|
||||
max_iterations: 100,
|
||||
min_stake: 1.0,
|
||||
slash_fraction: 0.1,
|
||||
num_permutations: 50,
|
||||
};
|
||||
let mut gta = GameTheoreticAttention::new(4, config);
|
||||
|
||||
let features = vec![
|
||||
vec![1.0, 0.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0, 0.0],
|
||||
];
|
||||
let edges = vec![(0, 1), (1, 2), (0, 2)];
|
||||
|
||||
let result = gta.compute(&features, &edges).unwrap();
|
||||
assert_eq!(result.output.len(), 3);
|
||||
assert_eq!(result.attention_weights.len(), 3);
|
||||
assert!(result.iterations > 0);
|
||||
// Weights should be non-negative
|
||||
for row in &result.attention_weights {
|
||||
for &w in row {
|
||||
assert!(w >= 0.0, "negative weight: {}", w);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nash_attention_converges() {
|
||||
let config = EconomicConfig {
|
||||
utility_weight: 1.0,
|
||||
temperature: 0.5,
|
||||
convergence_threshold: 0.001,
|
||||
max_iterations: 200,
|
||||
min_stake: 1.0,
|
||||
slash_fraction: 0.1,
|
||||
num_permutations: 50,
|
||||
};
|
||||
let mut gta = GameTheoreticAttention::new(2, config);
|
||||
|
||||
let features = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![0.5, 0.5]];
|
||||
let edges = vec![(0, 1), (1, 2), (0, 2)];
|
||||
|
||||
let result = gta.compute(&features, &edges).unwrap();
|
||||
// With sufficient iterations, should converge
|
||||
assert!(
|
||||
result.converged,
|
||||
"did not converge: max_delta={}",
|
||||
result.max_delta
|
||||
);
|
||||
assert!(result.attestation.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nash_attention_empty() {
|
||||
let config = EconomicConfig::default();
|
||||
let mut gta = GameTheoreticAttention::new(4, config);
|
||||
let result = gta.compute(&[], &[]).unwrap();
|
||||
assert!(result.output.is_empty());
|
||||
assert!(result.converged);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nash_attention_dim_mismatch() {
|
||||
let config = EconomicConfig::default();
|
||||
let mut gta = GameTheoreticAttention::new(4, config);
|
||||
let features = vec![vec![1.0, 2.0]]; // dim 2 != 4
|
||||
let result = gta.compute(&features, &[]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
// -- ShapleyAttention tests --
|
||||
|
||||
#[test]
|
||||
fn test_shapley_basic() {
|
||||
let mut shapley = ShapleyAttention::new(3, 100);
|
||||
let features = vec![
|
||||
vec![1.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0],
|
||||
];
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
let result = shapley.compute(&features, &mut rng).unwrap();
|
||||
assert_eq!(result.shapley_values.len(), 3);
|
||||
assert_eq!(result.output.len(), 3);
|
||||
// Coalition value should be positive
|
||||
assert!(result.coalition_value > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shapley_efficiency_axiom() {
|
||||
let mut shapley = ShapleyAttention::new(2, 500);
|
||||
let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
let result = shapley.compute(&features, &mut rng).unwrap();
|
||||
// Efficiency: sum of Shapley values should approximately equal coalition value
|
||||
let tolerance = 0.1 * result.coalition_value.abs().max(1.0);
|
||||
assert!(
|
||||
(result.value_sum - result.coalition_value).abs() < tolerance,
|
||||
"efficiency violated: sum={}, coalition={}",
|
||||
result.value_sum,
|
||||
result.coalition_value,
|
||||
);
|
||||
assert!(result.efficiency_satisfied);
|
||||
assert!(result.attestation.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shapley_empty() {
|
||||
let mut shapley = ShapleyAttention::new(4, 10);
|
||||
let mut rng = rand::thread_rng();
|
||||
let result = shapley.compute(&[], &mut rng).unwrap();
|
||||
assert!(result.shapley_values.is_empty());
|
||||
assert!(result.efficiency_satisfied);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shapley_single_node() {
|
||||
let mut shapley = ShapleyAttention::new(2, 50);
|
||||
let features = vec![vec![3.0, 4.0]];
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
let result = shapley.compute(&features, &mut rng).unwrap();
|
||||
assert_eq!(result.shapley_values.len(), 1);
|
||||
// Single node should get the full coalition value
|
||||
let expected_value = 3.0 * 3.0 + 4.0 * 4.0; // 25.0
|
||||
assert!(
|
||||
(result.shapley_values[0] - expected_value).abs() < 1.0,
|
||||
"single node Shapley: {}, expected ~{}",
|
||||
result.shapley_values[0],
|
||||
expected_value,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shapley_dim_mismatch() {
|
||||
let mut shapley = ShapleyAttention::new(4, 10);
|
||||
let features = vec![vec![1.0, 2.0]]; // dim 2 != 4
|
||||
let mut rng = rand::thread_rng();
|
||||
let result = shapley.compute(&features, &mut rng);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
// -- IncentiveAlignedMPNN tests --
|
||||
|
||||
#[test]
|
||||
fn test_incentive_mpnn_basic() {
|
||||
let mut mpnn = IncentiveAlignedMPNN::new(3, 1.0, 0.1);
|
||||
|
||||
let features = vec![
|
||||
vec![1.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0],
|
||||
];
|
||||
let stakes = vec![5.0, 5.0, 5.0];
|
||||
let edges = vec![(0, 1), (1, 2)];
|
||||
|
||||
let result = mpnn.step(&features, &stakes, &edges).unwrap();
|
||||
assert_eq!(result.output.len(), 3);
|
||||
assert_eq!(result.stakes.len(), 3);
|
||||
assert!(result.slashed_nodes.is_empty());
|
||||
assert!(result.all_stakes_sufficient);
|
||||
assert!(result.attestation.is_some());
|
||||
assert!(result.tier_decision.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incentive_mpnn_insufficient_stake() {
|
||||
let mut mpnn = IncentiveAlignedMPNN::new(2, 5.0, 0.2);
|
||||
|
||||
let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
|
||||
let stakes = vec![10.0, 1.0]; // node 1 below min_stake
|
||||
|
||||
let edges = vec![(0, 1)];
|
||||
|
||||
let result = mpnn.step(&features, &stakes, &edges).unwrap();
|
||||
// Node 1 doesn't participate -> no message exchange
|
||||
assert!(!result.all_stakes_sufficient);
|
||||
assert!(result.attestation.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incentive_mpnn_no_edges() {
|
||||
let mut mpnn = IncentiveAlignedMPNN::new(2, 1.0, 0.1);
|
||||
|
||||
let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
|
||||
let stakes = vec![5.0, 5.0];
|
||||
let edges: Vec<(usize, usize)> = vec![];
|
||||
|
||||
let result = mpnn.step(&features, &stakes, &edges).unwrap();
|
||||
// Without edges, output should equal input
|
||||
assert_eq!(result.output, features);
|
||||
assert!(result.slashed_nodes.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incentive_mpnn_stake_weighted() {
|
||||
let mut mpnn = IncentiveAlignedMPNN::new(2, 0.1, 0.1);
|
||||
|
||||
let features = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
|
||||
let stakes = vec![9.0, 1.0]; // node 0 has much higher stake
|
||||
|
||||
let edges = vec![(0, 1)];
|
||||
let result = mpnn.step(&features, &stakes, &edges).unwrap();
|
||||
|
||||
// Node 1's message to node 0 should be weighted less (low stake)
|
||||
// Node 0's message to node 1 should be weighted more (high stake)
|
||||
// Node 1's output should show more influence from node 0
|
||||
let node1_d0 = result.output[1][0];
|
||||
// Node 0 has stake_weight 0.9, so msg_0_to_1 = [0.9, 0.0]
|
||||
// Node 1 output = [0.0, 1.0] + [0.9, 0.0] = [0.9, 1.0]
|
||||
assert!(
|
||||
node1_d0 > 0.5,
|
||||
"node 1 should receive strong message from node 0: {}",
|
||||
node1_d0
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incentive_mpnn_stakes_length_mismatch() {
|
||||
let mut mpnn = IncentiveAlignedMPNN::new(2, 1.0, 0.1);
|
||||
let features = vec![vec![1.0, 2.0]];
|
||||
let stakes = vec![5.0, 5.0]; // mismatched
|
||||
let result = mpnn.step(&features, &stakes, &[]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incentive_mpnn_slash_fraction_bounds() {
|
||||
let mpnn = IncentiveAlignedMPNN::new(2, 0.0, 1.5);
|
||||
assert!((mpnn.slash_fraction() - 1.0).abs() < 1e-6);
|
||||
|
||||
let mpnn2 = IncentiveAlignedMPNN::new(2, 0.0, -0.5);
|
||||
assert!((mpnn2.slash_fraction() - 0.0).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
53
crates/ruvector-graph-transformer/src/error.rs
Normal file
53
crates/ruvector-graph-transformer/src/error.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
//! Error types for the graph transformer crate.
|
||||
//!
|
||||
//! Composes errors from all sub-crates into a unified error type.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Unified error type for graph transformer operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum GraphTransformerError {
|
||||
/// Verification error from ruvector-verified.
|
||||
#[error("verification error: {0}")]
|
||||
Verification(#[from] ruvector_verified::VerificationError),
|
||||
|
||||
/// GNN layer error from ruvector-gnn.
|
||||
#[error("gnn error: {0}")]
|
||||
Gnn(#[from] ruvector_gnn::GnnError),
|
||||
|
||||
/// Attention error from ruvector-attention.
|
||||
#[error("attention error: {0}")]
|
||||
Attention(#[from] ruvector_attention::AttentionError),
|
||||
|
||||
/// MinCut error from ruvector-mincut.
|
||||
#[error("mincut error: {0}")]
|
||||
MinCut(#[from] ruvector_mincut::MinCutError),
|
||||
|
||||
/// Proof gate violation: mutation attempted without valid proof.
|
||||
#[error("proof gate violation: {0}")]
|
||||
ProofGateViolation(String),
|
||||
|
||||
/// Configuration error.
|
||||
#[error("configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
/// Invariant violation detected during execution.
|
||||
#[error("invariant violation: {0}")]
|
||||
InvariantViolation(String),
|
||||
|
||||
/// Dimension mismatch in graph transformer operations.
|
||||
#[error("dimension mismatch: expected {expected}, got {actual}")]
|
||||
DimensionMismatch {
|
||||
/// Expected dimension.
|
||||
expected: usize,
|
||||
/// Actual dimension.
|
||||
actual: usize,
|
||||
},
|
||||
|
||||
/// Numerical error (NaN, Inf, or other instability).
|
||||
#[error("numerical error: {0}")]
|
||||
NumericalError(String),
|
||||
}
|
||||
|
||||
/// Convenience result type for graph transformer operations.
|
||||
pub type Result<T> = std::result::Result<T, GraphTransformerError>;
|
||||
174
crates/ruvector-graph-transformer/src/lib.rs
Normal file
174
crates/ruvector-graph-transformer/src/lib.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
//! Unified graph transformer with proof-gated mutation substrate.
|
||||
//!
|
||||
//! This crate composes existing RuVector crates through proof-gated mutation,
|
||||
//! providing a unified interface for graph neural network operations with
|
||||
//! formal verification guarantees.
|
||||
//!
|
||||
//! # Modules
|
||||
//!
|
||||
//! - [`proof_gated`]: Core proof-gated mutation types
|
||||
//! - [`sublinear_attention`]: O(n log n) attention via LSH and PPR sampling
|
||||
//! - [`physics`]: Hamiltonian graph networks with energy conservation proofs
|
||||
//! - [`biological`]: Spiking attention with STDP and Hebbian learning
|
||||
//! - [`self_organizing`]: Morphogenetic fields and L-system graph growth
|
||||
//! - [`verified_training`]: GNN training with per-step proof certificates
|
||||
//! - [`manifold`]: Product manifold attention on S^n x H^m x R^k
|
||||
//! - [`temporal`]: Causal temporal attention with Granger causality
|
||||
//! - [`economic`]: Game-theoretic, Shapley, and incentive-aligned attention
|
||||
//!
|
||||
//! # Feature Flags
|
||||
//!
|
||||
//! - `sublinear` (default): Sublinear attention mechanisms
|
||||
//! - `verified-training` (default): Verified training with certificates
|
||||
//! - `physics`: Hamiltonian graph networks
|
||||
//! - `biological`: Spiking and Hebbian attention
|
||||
//! - `self-organizing`: Morphogenetic fields and developmental programs
|
||||
//! - `manifold`: Product manifold attention
|
||||
//! - `temporal`: Causal temporal attention
|
||||
//! - `economic`: Game-theoretic and incentive-aligned attention
|
||||
//! - `full`: All features enabled
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod proof_gated;
|
||||
|
||||
#[cfg(feature = "sublinear")]
|
||||
pub mod sublinear_attention;
|
||||
|
||||
#[cfg(feature = "physics")]
|
||||
pub mod physics;
|
||||
|
||||
#[cfg(feature = "biological")]
|
||||
pub mod biological;
|
||||
|
||||
#[cfg(feature = "self-organizing")]
|
||||
pub mod self_organizing;
|
||||
|
||||
#[cfg(feature = "verified-training")]
|
||||
pub mod verified_training;
|
||||
|
||||
#[cfg(feature = "manifold")]
|
||||
pub mod manifold;
|
||||
|
||||
#[cfg(feature = "temporal")]
|
||||
pub mod temporal;
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
pub mod economic;
|
||||
|
||||
// Re-exports
|
||||
pub use config::GraphTransformerConfig;
|
||||
pub use error::{GraphTransformerError, Result};
|
||||
pub use proof_gated::{AttestationChain, ProofGate, ProofGatedMutation};
|
||||
|
||||
#[cfg(feature = "sublinear")]
|
||||
pub use sublinear_attention::SublinearGraphAttention;
|
||||
|
||||
#[cfg(feature = "physics")]
|
||||
pub use physics::{
|
||||
ConservativePdeAttention, GaugeEquivariantMP, GaugeOutput, HamiltonianGraphNet,
|
||||
HamiltonianOutput, HamiltonianState, LagrangianAttention, LagrangianOutput, PdeOutput,
|
||||
};
|
||||
|
||||
#[cfg(feature = "biological")]
|
||||
pub use biological::{
|
||||
BranchAssignment, DendriticAttention, EffectiveOperator, HebbianLayer, HebbianNormBound,
|
||||
HebbianRule, InhibitionStrategy, ScopeTransitionAttestation, SpikingGraphAttention,
|
||||
StdpEdgeUpdater,
|
||||
};
|
||||
|
||||
#[cfg(feature = "self-organizing")]
|
||||
pub use self_organizing::{DevelopmentalProgram, GraphCoarsener, MorphogeneticField};
|
||||
|
||||
#[cfg(feature = "verified-training")]
|
||||
pub use verified_training::{
|
||||
EnergyGateResult, InvariantStats, ProofClass, RollbackStrategy, TrainingCertificate,
|
||||
TrainingInvariant, TrainingStepResult, VerifiedTrainer,
|
||||
};
|
||||
|
||||
#[cfg(feature = "manifold")]
|
||||
pub use manifold::{
|
||||
CurvatureAdaptiveRouter, GeodesicMessagePassing, LieGroupEquivariantAttention, LieGroupType,
|
||||
ManifoldType, ProductManifoldAttention, RiemannianAdamOptimizer,
|
||||
};
|
||||
|
||||
#[cfg(feature = "temporal")]
|
||||
pub use temporal::{
|
||||
AttentionSnapshot, BatchModeToken, CausalGraphTransformer, ContinuousTimeODE, EdgeEventType,
|
||||
GrangerCausalityExtractor, GrangerCausalityResult, GrangerEdge, GrangerGraph, MaskStrategy,
|
||||
OdeOutput, RetrocausalAttention, SmoothedOutput, StorageTier, TemporalAttentionResult,
|
||||
TemporalEdgeEvent, TemporalEmbeddingStore,
|
||||
};
|
||||
|
||||
#[cfg(feature = "economic")]
|
||||
pub use economic::{GameTheoreticAttention, IncentiveAlignedMPNN, ShapleyAttention};
|
||||
|
||||
/// Unified graph transformer entry point.
|
||||
///
|
||||
/// Provides a single interface to all graph transformer modules,
|
||||
/// configured through [`GraphTransformerConfig`].
|
||||
pub struct GraphTransformer {
|
||||
config: GraphTransformerConfig,
|
||||
}
|
||||
|
||||
impl GraphTransformer {
|
||||
/// Create a new graph transformer with the given configuration.
|
||||
pub fn new(config: GraphTransformerConfig) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
|
||||
/// Create a graph transformer with default configuration.
|
||||
pub fn with_defaults() -> Self {
|
||||
Self {
|
||||
config: GraphTransformerConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the configuration.
|
||||
pub fn config(&self) -> &GraphTransformerConfig {
|
||||
&self.config
|
||||
}
|
||||
|
||||
/// Get the embedding dimension.
|
||||
pub fn embed_dim(&self) -> usize {
|
||||
self.config.embed_dim
|
||||
}
|
||||
|
||||
/// Create a proof gate wrapping a value.
|
||||
pub fn create_gate<T>(&self, value: T) -> ProofGate<T> {
|
||||
ProofGate::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_graph_transformer_creation() {
|
||||
let gt = GraphTransformer::with_defaults();
|
||||
assert_eq!(gt.embed_dim(), 64);
|
||||
assert!(gt.config().proof_gated);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_transformer_custom_config() {
|
||||
let config = GraphTransformerConfig {
|
||||
embed_dim: 128,
|
||||
num_heads: 8,
|
||||
dropout: 0.2,
|
||||
proof_gated: false,
|
||||
..Default::default()
|
||||
};
|
||||
let gt = GraphTransformer::new(config);
|
||||
assert_eq!(gt.embed_dim(), 128);
|
||||
assert!(!gt.config().proof_gated);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_gate() {
|
||||
let gt = GraphTransformer::with_defaults();
|
||||
let gate = gt.create_gate(vec![1.0, 2.0, 3.0]);
|
||||
assert_eq!(gate.read().len(), 3);
|
||||
}
|
||||
}
|
||||
1738
crates/ruvector-graph-transformer/src/manifold.rs
Normal file
1738
crates/ruvector-graph-transformer/src/manifold.rs
Normal file
File diff suppressed because it is too large
Load Diff
1035
crates/ruvector-graph-transformer/src/physics.rs
Normal file
1035
crates/ruvector-graph-transformer/src/physics.rs
Normal file
File diff suppressed because it is too large
Load Diff
1156
crates/ruvector-graph-transformer/src/proof_gated.rs
Normal file
1156
crates/ruvector-graph-transformer/src/proof_gated.rs
Normal file
File diff suppressed because it is too large
Load Diff
1007
crates/ruvector-graph-transformer/src/self_organizing.rs
Normal file
1007
crates/ruvector-graph-transformer/src/self_organizing.rs
Normal file
File diff suppressed because it is too large
Load Diff
367
crates/ruvector-graph-transformer/src/sublinear_attention.rs
Normal file
367
crates/ruvector-graph-transformer/src/sublinear_attention.rs
Normal file
@@ -0,0 +1,367 @@
|
||||
//! Sublinear graph attention mechanisms.
|
||||
//!
|
||||
//! Provides O(n log n) attention computation through:
|
||||
//! - LSH-bucket attention: locality-sensitive hashing for sparse attention patterns
|
||||
//! - PPR-sampled attention: personalized PageRank for neighbor sampling
|
||||
//! - Spectral sparsification: graph-theoretic attention pruning
|
||||
//!
|
||||
//! Uses `ruvector-attention` for the core attention computation and
|
||||
//! `ruvector-mincut` for graph structure operations.
|
||||
|
||||
#[cfg(feature = "sublinear")]
|
||||
use ruvector_attention::{Attention, ScaledDotProductAttention};
|
||||
// ruvector_mincut is available for advanced sparsification strategies.
|
||||
|
||||
#[cfg(feature = "sublinear")]
|
||||
use crate::config::SublinearConfig;
|
||||
#[cfg(feature = "sublinear")]
|
||||
use crate::error::{GraphTransformerError, Result};
|
||||
|
||||
/// Sublinear graph attention using LSH buckets and PPR sampling.
|
||||
///
|
||||
/// Achieves O(n log n) attention by only attending to a subset of nodes
|
||||
/// selected through locality-sensitive hashing and random walk sampling.
|
||||
#[cfg(feature = "sublinear")]
|
||||
pub struct SublinearGraphAttention {
|
||||
config: SublinearConfig,
|
||||
attention: ScaledDotProductAttention,
|
||||
embed_dim: usize,
|
||||
}
|
||||
|
||||
#[cfg(feature = "sublinear")]
|
||||
impl SublinearGraphAttention {
|
||||
/// Create a new sublinear graph attention module.
|
||||
pub fn new(embed_dim: usize, config: SublinearConfig) -> Self {
|
||||
let attention = ScaledDotProductAttention::new(embed_dim);
|
||||
Self {
|
||||
config,
|
||||
attention,
|
||||
embed_dim,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute LSH-bucket attention over node features.
|
||||
///
|
||||
/// Hashes node features into buckets and computes attention only
|
||||
/// within each bucket, reducing complexity from O(n^2) to O(n * B)
|
||||
/// where B is the bucket size.
|
||||
pub fn lsh_attention(&self, node_features: &[Vec<f32>]) -> Result<Vec<Vec<f32>>> {
|
||||
if node_features.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let dim = node_features[0].len();
|
||||
if dim != self.embed_dim {
|
||||
return Err(GraphTransformerError::DimensionMismatch {
|
||||
expected: self.embed_dim,
|
||||
actual: dim,
|
||||
});
|
||||
}
|
||||
|
||||
let n = node_features.len();
|
||||
let num_buckets = self.config.lsh_buckets.max(1);
|
||||
let mut buckets: Vec<Vec<usize>> = vec![Vec::new(); num_buckets];
|
||||
|
||||
// Simple LSH: hash based on sign of random projections
|
||||
for (i, feat) in node_features.iter().enumerate() {
|
||||
let bucket = lsh_hash(feat, num_buckets);
|
||||
buckets[bucket].push(i);
|
||||
}
|
||||
|
||||
// Compute attention within each bucket
|
||||
let mut outputs = vec![vec![0.0f32; dim]; n];
|
||||
for bucket in &buckets {
|
||||
if bucket.len() < 2 {
|
||||
for &idx in bucket {
|
||||
outputs[idx] = node_features[idx].clone();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
for &query_idx in bucket {
|
||||
let query = &node_features[query_idx];
|
||||
let keys: Vec<&[f32]> = bucket
|
||||
.iter()
|
||||
.filter(|&&i| i != query_idx)
|
||||
.map(|&i| node_features[i].as_slice())
|
||||
.collect();
|
||||
let values: Vec<&[f32]> = keys.clone();
|
||||
|
||||
if keys.is_empty() {
|
||||
outputs[query_idx] = query.clone();
|
||||
continue;
|
||||
}
|
||||
|
||||
let result = self
|
||||
.attention
|
||||
.compute(query, &keys, &values)
|
||||
.map_err(GraphTransformerError::Attention)?;
|
||||
outputs[query_idx] = result;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
/// Compute PPR-sampled attention.
|
||||
///
|
||||
/// Uses Personalized PageRank to select the most relevant neighbors
|
||||
/// for each node, then computes attention over only those neighbors.
|
||||
pub fn ppr_attention(
|
||||
&self,
|
||||
node_features: &[Vec<f32>],
|
||||
edges: &[(usize, usize, f64)],
|
||||
) -> Result<Vec<Vec<f32>>> {
|
||||
if node_features.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let dim = node_features[0].len();
|
||||
let n = node_features.len();
|
||||
let k = self.config.ppr_samples.min(n - 1).max(1);
|
||||
|
||||
// Build adjacency from edges
|
||||
let mut adj: Vec<Vec<usize>> = vec![Vec::new(); n];
|
||||
for &(u, v, _w) in edges {
|
||||
if u < n && v < n {
|
||||
adj[u].push(v);
|
||||
adj[v].push(u);
|
||||
}
|
||||
}
|
||||
|
||||
// For each node, sample neighbors via short random walks
|
||||
let mut outputs = vec![vec![0.0f32; dim]; n];
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for i in 0..n {
|
||||
let sampled = ppr_sample(&adj, i, k, &mut rng);
|
||||
|
||||
if sampled.is_empty() {
|
||||
outputs[i] = node_features[i].clone();
|
||||
continue;
|
||||
}
|
||||
|
||||
let query = &node_features[i];
|
||||
let keys: Vec<&[f32]> = sampled
|
||||
.iter()
|
||||
.map(|&j| node_features[j].as_slice())
|
||||
.collect();
|
||||
let values: Vec<&[f32]> = keys.clone();
|
||||
|
||||
let result = self
|
||||
.attention
|
||||
.compute(query, &keys, &values)
|
||||
.map_err(GraphTransformerError::Attention)?;
|
||||
outputs[i] = result;
|
||||
}
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
/// Compute spectrally sparsified attention.
|
||||
///
|
||||
/// Uses the graph's spectral structure to prune attention weights,
|
||||
/// keeping only edges that contribute significantly to the graph's
|
||||
/// connectivity (measured via effective resistance).
|
||||
pub fn spectral_attention(
|
||||
&self,
|
||||
node_features: &[Vec<f32>],
|
||||
edges: &[(usize, usize, f64)],
|
||||
) -> Result<Vec<Vec<f32>>> {
|
||||
if node_features.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let dim = node_features[0].len();
|
||||
let n = node_features.len();
|
||||
let sparsity = self.config.sparsification_factor;
|
||||
|
||||
// Build adjacency with weight thresholding
|
||||
let mut adj: Vec<Vec<(usize, f64)>> = vec![Vec::new(); n];
|
||||
for &(u, v, w) in edges {
|
||||
if u < n && v < n {
|
||||
adj[u].push((v, w));
|
||||
adj[v].push((u, w));
|
||||
}
|
||||
}
|
||||
|
||||
// Sparsify: keep edges above the sparsification threshold
|
||||
let mut outputs = vec![vec![0.0f32; dim]; n];
|
||||
for i in 0..n {
|
||||
// Select top neighbors by edge weight
|
||||
let mut neighbors = adj[i].clone();
|
||||
neighbors.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
|
||||
let keep = ((neighbors.len() as f32 * sparsity) as usize).max(1);
|
||||
neighbors.truncate(keep);
|
||||
|
||||
if neighbors.is_empty() {
|
||||
outputs[i] = node_features[i].clone();
|
||||
continue;
|
||||
}
|
||||
|
||||
let query = &node_features[i];
|
||||
let keys: Vec<&[f32]> = neighbors
|
||||
.iter()
|
||||
.map(|&(j, _)| node_features[j].as_slice())
|
||||
.collect();
|
||||
let values: Vec<&[f32]> = keys.clone();
|
||||
|
||||
let result = self
|
||||
.attention
|
||||
.compute(query, &keys, &values)
|
||||
.map_err(GraphTransformerError::Attention)?;
|
||||
outputs[i] = result;
|
||||
}
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
/// Get the embedding dimension.
|
||||
pub fn embed_dim(&self) -> usize {
|
||||
self.embed_dim
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple LSH hash based on the sum of feature values.
|
||||
#[cfg(feature = "sublinear")]
|
||||
fn lsh_hash(features: &[f32], num_buckets: usize) -> usize {
|
||||
let mut h: u64 = 0;
|
||||
for (i, &v) in features.iter().enumerate() {
|
||||
let bits = v.to_bits() as u64;
|
||||
h = h.wrapping_add(bits.wrapping_mul(i as u64 + 1));
|
||||
}
|
||||
h = h.wrapping_mul(0x517cc1b727220a95);
|
||||
(h as usize) % num_buckets
|
||||
}
|
||||
|
||||
/// Sample neighbors via short random walks (PPR approximation).
|
||||
#[cfg(feature = "sublinear")]
|
||||
fn ppr_sample(adj: &[Vec<usize>], source: usize, k: usize, rng: &mut impl rand::Rng) -> Vec<usize> {
|
||||
use std::collections::HashSet;
|
||||
|
||||
let alpha = 0.15; // teleportation probability
|
||||
let mut visited = HashSet::new();
|
||||
let max_walks = k * 4;
|
||||
|
||||
for _ in 0..max_walks {
|
||||
if visited.len() >= k {
|
||||
break;
|
||||
}
|
||||
|
||||
let mut current = source;
|
||||
for _ in 0..10 {
|
||||
if rng.gen::<f64>() < alpha {
|
||||
break;
|
||||
}
|
||||
if adj[current].is_empty() {
|
||||
break;
|
||||
}
|
||||
let idx = rng.gen_range(0..adj[current].len());
|
||||
current = adj[current][idx];
|
||||
}
|
||||
|
||||
if current != source {
|
||||
visited.insert(current);
|
||||
}
|
||||
}
|
||||
|
||||
visited.into_iter().collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "sublinear")]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_lsh_attention_basic() {
|
||||
let config = SublinearConfig {
|
||||
lsh_buckets: 4,
|
||||
ppr_samples: 8,
|
||||
sparsification_factor: 0.5,
|
||||
};
|
||||
let attn = SublinearGraphAttention::new(8, config);
|
||||
|
||||
let features = vec![vec![1.0; 8], vec![0.5; 8], vec![0.3; 8], vec![0.8; 8]];
|
||||
|
||||
let result = attn.lsh_attention(&features);
|
||||
assert!(result.is_ok());
|
||||
let outputs = result.unwrap();
|
||||
assert_eq!(outputs.len(), 4);
|
||||
for out in &outputs {
|
||||
assert_eq!(out.len(), 8);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lsh_attention_empty() {
|
||||
let config = SublinearConfig::default();
|
||||
let attn = SublinearGraphAttention::new(8, config);
|
||||
let result = attn.lsh_attention(&[]);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ppr_attention_basic() {
|
||||
let config = SublinearConfig {
|
||||
lsh_buckets: 4,
|
||||
ppr_samples: 2,
|
||||
sparsification_factor: 0.5,
|
||||
};
|
||||
let attn = SublinearGraphAttention::new(4, config);
|
||||
|
||||
let features = vec![
|
||||
vec![1.0, 0.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0, 0.0],
|
||||
vec![0.0, 0.0, 0.0, 1.0],
|
||||
];
|
||||
let edges = vec![(0, 1, 1.0), (1, 2, 1.0), (2, 3, 1.0), (3, 0, 1.0)];
|
||||
|
||||
let result = attn.ppr_attention(&features, &edges);
|
||||
assert!(result.is_ok());
|
||||
let outputs = result.unwrap();
|
||||
assert_eq!(outputs.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spectral_attention_basic() {
|
||||
let config = SublinearConfig {
|
||||
lsh_buckets: 4,
|
||||
ppr_samples: 4,
|
||||
sparsification_factor: 0.5,
|
||||
};
|
||||
let attn = SublinearGraphAttention::new(4, config);
|
||||
|
||||
let features = vec![
|
||||
vec![1.0, 0.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0, 0.0],
|
||||
];
|
||||
let edges = vec![(0, 1, 2.0), (1, 2, 1.0), (0, 2, 0.5)];
|
||||
|
||||
let result = attn.spectral_attention(&features, &edges);
|
||||
assert!(result.is_ok());
|
||||
let outputs = result.unwrap();
|
||||
assert_eq!(outputs.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dimension_mismatch() {
|
||||
let config = SublinearConfig::default();
|
||||
let attn = SublinearGraphAttention::new(8, config);
|
||||
let features = vec![vec![1.0; 4]]; // dim 4 != embed_dim 8
|
||||
let result = attn.lsh_attention(&features);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lsh_hash_deterministic() {
|
||||
let f = vec![1.0, 2.0, 3.0, 4.0];
|
||||
let h1 = lsh_hash(&f, 16);
|
||||
let h2 = lsh_hash(&f, 16);
|
||||
assert_eq!(h1, h2);
|
||||
assert!(h1 < 16);
|
||||
}
|
||||
}
|
||||
1855
crates/ruvector-graph-transformer/src/temporal.rs
Normal file
1855
crates/ruvector-graph-transformer/src/temporal.rs
Normal file
File diff suppressed because it is too large
Load Diff
1419
crates/ruvector-graph-transformer/src/verified_training.rs
Normal file
1419
crates/ruvector-graph-transformer/src/verified_training.rs
Normal file
File diff suppressed because it is too large
Load Diff
508
crates/ruvector-graph-transformer/tests/integration.rs
Normal file
508
crates/ruvector-graph-transformer/tests/integration.rs
Normal file
@@ -0,0 +1,508 @@
|
||||
//! Integration tests for ruvector-graph-transformer.
|
||||
//!
|
||||
//! Tests the composition of all modules through proof-gated operations.
|
||||
|
||||
use ruvector_graph_transformer::{
|
||||
AttestationChain, GraphTransformer, GraphTransformerConfig, ProofGate,
|
||||
};
|
||||
use ruvector_verified::{
|
||||
gated::{ProofKind, ProofTier},
|
||||
proof_store::create_attestation,
|
||||
ProofEnvironment,
|
||||
};
|
||||
|
||||
// ---- Proof-gated tests ----
|
||||
|
||||
#[test]
|
||||
fn test_proof_gate_create_and_read() {
|
||||
let gate = ProofGate::new(42u32);
|
||||
assert_eq!(*gate.read(), 42);
|
||||
assert!(gate.attestation_chain().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_gate_dim_mutation_succeeds() {
|
||||
let mut gate = ProofGate::new(vec![0.0f32; 128]);
|
||||
let result = gate.mutate_with_dim_proof(128, 128, |v| {
|
||||
v[0] = 42.0;
|
||||
});
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(gate.read()[0], 42.0);
|
||||
assert_eq!(gate.attestation_chain().len(), 1);
|
||||
|
||||
// Verify attestation
|
||||
let entry = gate.attestation_chain().latest().unwrap();
|
||||
assert_eq!(entry.sequence, 0);
|
||||
assert!(entry.attestation.verification_timestamp_ns > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_gate_dim_mutation_fails_on_mismatch() {
|
||||
let mut gate = ProofGate::new(vec![0.0f32; 64]);
|
||||
let result = gate.mutate_with_dim_proof(128, 64, |v| {
|
||||
v[0] = 1.0; // should not execute
|
||||
});
|
||||
assert!(result.is_err());
|
||||
assert_eq!(gate.read()[0], 0.0); // unchanged
|
||||
assert!(gate.attestation_chain().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_gate_routed_mutation() {
|
||||
let mut gate = ProofGate::new(100i32);
|
||||
let result = gate.mutate_with_routed_proof(ProofKind::Reflexivity, 5, 5, |v| *v += 50);
|
||||
assert!(result.is_ok());
|
||||
let (decision, attestation) = result.unwrap();
|
||||
assert_eq!(decision.tier, ProofTier::Reflex);
|
||||
assert_eq!(*gate.read(), 150);
|
||||
assert!(attestation.verification_timestamp_ns > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_gate_pipeline_mutation() {
|
||||
let mut gate = ProofGate::new(String::from("initial"));
|
||||
let stages = vec![
|
||||
("embed".into(), 1u32, 2u32),
|
||||
("align".into(), 2, 3),
|
||||
("call".into(), 3, 4),
|
||||
];
|
||||
let result = gate.mutate_with_pipeline_proof(&stages, |s| {
|
||||
*s = String::from("transformed");
|
||||
});
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(gate.read().as_str(), "transformed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_attestation_chain_integrity() {
|
||||
let mut chain = AttestationChain::new();
|
||||
let env = ProofEnvironment::new();
|
||||
for i in 0..10 {
|
||||
let att = create_attestation(&env, i);
|
||||
chain.append(att);
|
||||
}
|
||||
assert_eq!(chain.len(), 10);
|
||||
assert!(chain.verify_integrity());
|
||||
assert!(!chain.is_empty());
|
||||
assert_ne!(chain.chain_hash(), 0);
|
||||
}
|
||||
|
||||
// ---- Sublinear attention tests ----
|
||||
|
||||
#[cfg(feature = "sublinear")]
|
||||
mod sublinear_tests {
|
||||
use ruvector_graph_transformer::config::SublinearConfig;
|
||||
use ruvector_graph_transformer::SublinearGraphAttention;
|
||||
|
||||
#[test]
|
||||
fn test_lsh_attention_basic() {
|
||||
let config = SublinearConfig {
|
||||
lsh_buckets: 4,
|
||||
ppr_samples: 8,
|
||||
sparsification_factor: 0.5,
|
||||
};
|
||||
let attn = SublinearGraphAttention::new(8, config);
|
||||
|
||||
let features: Vec<Vec<f32>> = (0..10).map(|i| vec![i as f32 * 0.1; 8]).collect();
|
||||
|
||||
let result = attn.lsh_attention(&features);
|
||||
assert!(result.is_ok());
|
||||
let outputs = result.unwrap();
|
||||
assert_eq!(outputs.len(), 10);
|
||||
for out in &outputs {
|
||||
assert_eq!(out.len(), 8);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ppr_attention_on_small_graph() {
|
||||
let config = SublinearConfig {
|
||||
lsh_buckets: 4,
|
||||
ppr_samples: 3,
|
||||
sparsification_factor: 0.5,
|
||||
};
|
||||
let attn = SublinearGraphAttention::new(4, config);
|
||||
|
||||
let features = vec![
|
||||
vec![1.0, 0.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0, 0.0],
|
||||
vec![0.0, 0.0, 0.0, 1.0],
|
||||
vec![0.5, 0.5, 0.0, 0.0],
|
||||
];
|
||||
let edges = vec![
|
||||
(0, 1, 1.0),
|
||||
(1, 2, 1.0),
|
||||
(2, 3, 1.0),
|
||||
(3, 4, 1.0),
|
||||
(4, 0, 1.0),
|
||||
];
|
||||
|
||||
let result = attn.ppr_attention(&features, &edges);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap().len(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spectral_attention_on_small_graph() {
|
||||
let config = SublinearConfig {
|
||||
lsh_buckets: 4,
|
||||
ppr_samples: 4,
|
||||
sparsification_factor: 0.5,
|
||||
};
|
||||
let attn = SublinearGraphAttention::new(4, config);
|
||||
|
||||
let features = vec![
|
||||
vec![1.0, 0.5, 0.3, 0.1],
|
||||
vec![0.5, 1.0, 0.4, 0.2],
|
||||
vec![0.3, 0.4, 1.0, 0.5],
|
||||
];
|
||||
let edges = vec![(0, 1, 2.0), (1, 2, 1.0), (0, 2, 0.5)];
|
||||
|
||||
let result = attn.spectral_attention(&features, &edges);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Physics tests ----
|
||||
|
||||
#[cfg(feature = "physics")]
|
||||
mod physics_tests {
|
||||
use ruvector_graph_transformer::config::PhysicsConfig;
|
||||
use ruvector_graph_transformer::HamiltonianGraphNet;
|
||||
|
||||
#[test]
|
||||
fn test_hamiltonian_step_energy_conservation() {
|
||||
let config = PhysicsConfig {
|
||||
dt: 0.001,
|
||||
leapfrog_steps: 1,
|
||||
energy_tolerance: 0.1,
|
||||
};
|
||||
let mut hgn = HamiltonianGraphNet::new(4, config);
|
||||
|
||||
let features = vec![vec![0.1, 0.2, 0.3, 0.4], vec![0.4, 0.3, 0.2, 0.1]];
|
||||
let state = hgn.init_state(&features).unwrap();
|
||||
let edges = vec![(0, 1, 0.1)];
|
||||
|
||||
let result = hgn.step(&state, &edges).unwrap();
|
||||
let energy_diff = (result.energy_after - result.energy_before).abs();
|
||||
assert!(
|
||||
energy_diff < 0.1,
|
||||
"energy not conserved: diff={}",
|
||||
energy_diff
|
||||
);
|
||||
assert!(result.energy_conserved);
|
||||
assert!(result.attestation.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Biological tests ----
|
||||
|
||||
#[cfg(feature = "biological")]
|
||||
mod biological_tests {
|
||||
use ruvector_graph_transformer::config::BiologicalConfig;
|
||||
use ruvector_graph_transformer::{HebbianLayer, SpikingGraphAttention};
|
||||
|
||||
#[test]
|
||||
fn test_spiking_attention_update() {
|
||||
let config = BiologicalConfig {
|
||||
tau_membrane: 10.0,
|
||||
threshold: 0.3,
|
||||
stdp_rate: 0.01,
|
||||
max_weight: 5.0,
|
||||
};
|
||||
let mut sga = SpikingGraphAttention::new(3, 4, config);
|
||||
|
||||
let features = vec![
|
||||
vec![0.8, 0.6, 0.4, 0.2],
|
||||
vec![0.1, 0.2, 0.3, 0.4],
|
||||
vec![0.9, 0.7, 0.5, 0.3],
|
||||
];
|
||||
let weights = vec![
|
||||
vec![0.0, 0.5, 0.3],
|
||||
vec![0.5, 0.0, 0.2],
|
||||
vec![0.3, 0.2, 0.0],
|
||||
];
|
||||
let adjacency = vec![(0, 1), (1, 2), (0, 2)];
|
||||
|
||||
let result = sga.step(&features, &weights, &adjacency).unwrap();
|
||||
assert_eq!(result.features.len(), 3);
|
||||
|
||||
// Verify weight bounds
|
||||
for row in &result.weights {
|
||||
for &w in row {
|
||||
assert!(w.abs() <= 5.0, "weight {} exceeds bound", w);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hebbian_weight_bounds() {
|
||||
let hebb = HebbianLayer::new(4, 1.0, 2.0);
|
||||
let pre = vec![1.0, 1.0, 1.0, 1.0];
|
||||
let post = vec![1.0, 1.0, 1.0, 1.0];
|
||||
let mut weights = vec![0.0; 4];
|
||||
|
||||
for _ in 0..100 {
|
||||
hebb.update(&pre, &post, &mut weights).unwrap();
|
||||
}
|
||||
assert!(hebb.verify_bounds(&weights));
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Self-organizing tests ----
|
||||
|
||||
#[cfg(feature = "self-organizing")]
|
||||
mod self_organizing_tests {
|
||||
use ruvector_graph_transformer::config::SelfOrganizingConfig;
|
||||
use ruvector_graph_transformer::self_organizing::{GrowthRule, GrowthRuleKind};
|
||||
use ruvector_graph_transformer::{DevelopmentalProgram, MorphogeneticField};
|
||||
|
||||
#[test]
|
||||
fn test_morphogenetic_step_topology_invariants() {
|
||||
let config = SelfOrganizingConfig {
|
||||
diffusion_rate: 0.05,
|
||||
reaction_rate: 0.04,
|
||||
max_growth_steps: 100,
|
||||
coherence_threshold: 0.0,
|
||||
};
|
||||
let mut field = MorphogeneticField::new(5, config);
|
||||
|
||||
let edges = vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)];
|
||||
|
||||
for _ in 0..5 {
|
||||
let result = field.step(&edges).unwrap();
|
||||
// Concentrations must remain bounded [0.0, 2.0]
|
||||
for &a in &result.activator {
|
||||
assert!(a >= 0.0 && a <= 2.0);
|
||||
}
|
||||
for &b in &result.inhibitor {
|
||||
assert!(b >= 0.0 && b <= 2.0);
|
||||
}
|
||||
// Bounds-passing step should produce attestation
|
||||
assert!(result.attestation.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_developmental_growth_rules() {
|
||||
let rules = vec![GrowthRule {
|
||||
activator_threshold: 0.5,
|
||||
max_degree: 3,
|
||||
connection_weight: 1.0,
|
||||
kind: GrowthRuleKind::Branch,
|
||||
}];
|
||||
let mut program = DevelopmentalProgram::new(rules, 10);
|
||||
|
||||
let activator = vec![0.8, 0.6, 0.2, 0.9];
|
||||
let degrees = vec![1, 1, 1, 1];
|
||||
let edges = vec![(0, 1), (2, 3)];
|
||||
|
||||
let result = program.grow_step(&activator, °rees, &edges).unwrap();
|
||||
assert!(result.edges_added > 0);
|
||||
assert!(result.attestation.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Verified training tests ----
|
||||
|
||||
#[cfg(feature = "verified-training")]
|
||||
mod verified_training_tests {
|
||||
use ruvector_gnn::RuvectorLayer;
|
||||
use ruvector_graph_transformer::config::VerifiedTrainingConfig;
|
||||
use ruvector_graph_transformer::{RollbackStrategy, TrainingInvariant, VerifiedTrainer};
|
||||
|
||||
#[test]
|
||||
fn test_verified_training_single_step_certificate() {
|
||||
let config = VerifiedTrainingConfig {
|
||||
lipschitz_bound: 100.0,
|
||||
verify_monotonicity: true,
|
||||
learning_rate: 0.001,
|
||||
..Default::default()
|
||||
};
|
||||
let invariants = vec![TrainingInvariant::WeightNormBound {
|
||||
max_norm: 1000.0,
|
||||
rollback_strategy: RollbackStrategy::DeltaApply,
|
||||
}];
|
||||
let mut trainer = VerifiedTrainer::new(4, 8, config, invariants);
|
||||
|
||||
let layer = RuvectorLayer::new(4, 8, 2, 0.0).unwrap();
|
||||
let features = vec![vec![1.0, 0.0, 0.0, 0.0]];
|
||||
let neighbors = vec![vec![]];
|
||||
let weights = vec![vec![]];
|
||||
let targets = vec![vec![0.0; 8]];
|
||||
|
||||
let result = trainer.train_step(&features, &neighbors, &weights, &targets, &layer);
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
assert_eq!(result.step, 1);
|
||||
assert!(result.loss >= 0.0);
|
||||
assert!(result.weights_committed);
|
||||
assert!(result.attestation.verification_timestamp_ns > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verified_training_multiple_steps() {
|
||||
let config = VerifiedTrainingConfig {
|
||||
lipschitz_bound: 100.0,
|
||||
verify_monotonicity: false,
|
||||
learning_rate: 0.001,
|
||||
..Default::default()
|
||||
};
|
||||
let invariants = vec![TrainingInvariant::WeightNormBound {
|
||||
max_norm: 1000.0,
|
||||
rollback_strategy: RollbackStrategy::DeltaApply,
|
||||
}];
|
||||
let mut trainer = VerifiedTrainer::new(4, 8, config, invariants);
|
||||
|
||||
let layer = RuvectorLayer::new(4, 8, 2, 0.0).unwrap();
|
||||
|
||||
for _ in 0..3 {
|
||||
let result = trainer
|
||||
.train_step(
|
||||
&[vec![1.0; 4]],
|
||||
&[vec![]],
|
||||
&[vec![]],
|
||||
&[vec![0.0; 8]],
|
||||
&layer,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(result.weights_committed);
|
||||
}
|
||||
|
||||
assert_eq!(trainer.step_count(), 3);
|
||||
assert_eq!(trainer.step_results().len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Manifold tests ----
|
||||
|
||||
#[cfg(feature = "manifold")]
|
||||
mod manifold_tests {
|
||||
use ruvector_graph_transformer::config::ManifoldConfig;
|
||||
use ruvector_graph_transformer::manifold::{hyperbolic_geodesic, spherical_geodesic};
|
||||
use ruvector_graph_transformer::ProductManifoldAttention;
|
||||
|
||||
#[test]
|
||||
fn test_product_manifold_attention_curvature() {
|
||||
let config = ManifoldConfig {
|
||||
spherical_dim: 4,
|
||||
hyperbolic_dim: 4,
|
||||
euclidean_dim: 4,
|
||||
curvature: -1.0,
|
||||
};
|
||||
let mut attn = ProductManifoldAttention::new(config);
|
||||
assert_eq!(attn.total_dim(), 12);
|
||||
|
||||
let query = vec![0.5; 12];
|
||||
let keys = vec![vec![0.3; 12], vec![0.7; 12]];
|
||||
let values = vec![vec![1.0; 12], vec![2.0; 12]];
|
||||
|
||||
let result = attn.compute(&query, &keys, &values).unwrap();
|
||||
assert_eq!(result.output.len(), 12);
|
||||
|
||||
// Verify curvatures
|
||||
assert!(result.curvatures.spherical > 0.0);
|
||||
assert!(result.curvatures.hyperbolic < 0.0);
|
||||
assert!((result.curvatures.euclidean).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spherical_geodesic_distance() {
|
||||
let a = vec![1.0, 0.0];
|
||||
let b = vec![0.0, 1.0];
|
||||
let dist = spherical_geodesic(&a, &b);
|
||||
assert!((dist - std::f32::consts::FRAC_PI_2).abs() < 1e-4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hyperbolic_geodesic_distance() {
|
||||
let a = vec![0.0, 0.0];
|
||||
let b = vec![0.1, 0.0];
|
||||
let dist = hyperbolic_geodesic(&a, &b, -1.0);
|
||||
assert!(dist > 0.0);
|
||||
assert!(dist.is_finite());
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Temporal tests ----
|
||||
|
||||
#[cfg(feature = "temporal")]
|
||||
mod temporal_tests {
|
||||
use ruvector_graph_transformer::config::TemporalConfig;
|
||||
use ruvector_graph_transformer::CausalGraphTransformer;
|
||||
|
||||
#[test]
|
||||
fn test_causal_attention_ordering() {
|
||||
let config = TemporalConfig {
|
||||
decay_rate: 0.9,
|
||||
max_lag: 10,
|
||||
granger_lags: 3,
|
||||
};
|
||||
let transformer = CausalGraphTransformer::new(4, config);
|
||||
|
||||
let sequence = vec![
|
||||
vec![1.0, 0.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0, 0.0],
|
||||
vec![0.0, 0.0, 0.0, 1.0],
|
||||
vec![0.5, 0.5, 0.0, 0.0],
|
||||
];
|
||||
|
||||
let result = transformer.temporal_attention(&sequence).unwrap();
|
||||
assert_eq!(result.output.len(), 5);
|
||||
assert_eq!(result.attention_weights.len(), 5);
|
||||
|
||||
// Verify causal ordering: no future attention
|
||||
assert!(transformer.verify_causal_ordering(&result.attention_weights));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_granger_causality_extraction() {
|
||||
let config = TemporalConfig {
|
||||
decay_rate: 0.9,
|
||||
max_lag: 5,
|
||||
granger_lags: 2,
|
||||
};
|
||||
let transformer = CausalGraphTransformer::new(4, config);
|
||||
|
||||
let mut series = Vec::new();
|
||||
for t in 0..30 {
|
||||
let x = (t as f32 * 0.1).sin();
|
||||
let y = (t as f32 * 0.2).cos();
|
||||
series.push(vec![x, y, 0.0, 0.0]);
|
||||
}
|
||||
|
||||
let result = transformer.granger_causality(&series, 0, 1).unwrap();
|
||||
assert_eq!(result.source, 0);
|
||||
assert_eq!(result.target, 1);
|
||||
assert_eq!(result.lags, 2);
|
||||
assert!(result.f_statistic >= 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Integration: Composing multiple modules ----
|
||||
|
||||
#[test]
|
||||
fn test_graph_transformer_unified_entry() {
|
||||
let config = GraphTransformerConfig::default();
|
||||
let gt = GraphTransformer::new(config);
|
||||
assert_eq!(gt.embed_dim(), 64);
|
||||
|
||||
let gate = gt.create_gate(vec![1.0, 2.0, 3.0]);
|
||||
assert_eq!(gate.read().len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_gate_multiple_mutations() {
|
||||
let mut gate = ProofGate::new(0u64);
|
||||
|
||||
for i in 1..=5u32 {
|
||||
let result = gate.mutate_with_dim_proof(i, i, |v| *v += 1);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
assert_eq!(*gate.read(), 5);
|
||||
assert_eq!(gate.attestation_chain().len(), 5);
|
||||
assert!(gate.attestation_chain().verify_integrity());
|
||||
}
|
||||
Reference in New Issue
Block a user