Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,287 @@
//! Configuration types for all graph transformer modules.
use serde::{Deserialize, Serialize};
/// Top-level configuration for the graph transformer.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GraphTransformerConfig {
/// Embedding dimension for node features.
pub embed_dim: usize,
/// Number of attention heads.
pub num_heads: usize,
/// Dropout rate (0.0 to 1.0).
pub dropout: f32,
/// Whether to enable proof gating on all mutations.
pub proof_gated: bool,
/// Sublinear attention configuration.
#[cfg(feature = "sublinear")]
pub sublinear: SublinearConfig,
/// Physics module configuration.
#[cfg(feature = "physics")]
pub physics: PhysicsConfig,
/// Biological module configuration.
#[cfg(feature = "biological")]
pub biological: BiologicalConfig,
/// Self-organizing module configuration.
#[cfg(feature = "self-organizing")]
pub self_organizing: SelfOrganizingConfig,
/// Verified training configuration.
#[cfg(feature = "verified-training")]
pub verified_training: VerifiedTrainingConfig,
/// Manifold module configuration.
#[cfg(feature = "manifold")]
pub manifold: ManifoldConfig,
/// Temporal module configuration.
#[cfg(feature = "temporal")]
pub temporal: TemporalConfig,
/// Economic module configuration.
#[cfg(feature = "economic")]
pub economic: EconomicConfig,
}
impl Default for GraphTransformerConfig {
fn default() -> Self {
Self {
embed_dim: 64,
num_heads: 4,
dropout: 0.1,
proof_gated: true,
#[cfg(feature = "sublinear")]
sublinear: SublinearConfig::default(),
#[cfg(feature = "physics")]
physics: PhysicsConfig::default(),
#[cfg(feature = "biological")]
biological: BiologicalConfig::default(),
#[cfg(feature = "self-organizing")]
self_organizing: SelfOrganizingConfig::default(),
#[cfg(feature = "verified-training")]
verified_training: VerifiedTrainingConfig::default(),
#[cfg(feature = "manifold")]
manifold: ManifoldConfig::default(),
#[cfg(feature = "temporal")]
temporal: TemporalConfig::default(),
#[cfg(feature = "economic")]
economic: EconomicConfig::default(),
}
}
}
/// Configuration for sublinear attention.
#[cfg(feature = "sublinear")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SublinearConfig {
/// Number of LSH buckets for locality-sensitive hashing.
pub lsh_buckets: usize,
/// Number of PPR random walk samples.
pub ppr_samples: usize,
/// Spectral sparsification factor (0.0 to 1.0).
pub sparsification_factor: f32,
}
#[cfg(feature = "sublinear")]
impl Default for SublinearConfig {
fn default() -> Self {
Self {
lsh_buckets: 16,
ppr_samples: 32,
sparsification_factor: 0.5,
}
}
}
/// Configuration for Hamiltonian graph networks.
#[cfg(feature = "physics")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PhysicsConfig {
/// Time step for symplectic integration.
pub dt: f32,
/// Number of leapfrog steps per update.
pub leapfrog_steps: usize,
/// Energy conservation tolerance.
pub energy_tolerance: f32,
}
#[cfg(feature = "physics")]
impl Default for PhysicsConfig {
fn default() -> Self {
Self {
dt: 0.01,
leapfrog_steps: 10,
energy_tolerance: 1e-4,
}
}
}
/// Configuration for biological graph attention.
#[cfg(feature = "biological")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BiologicalConfig {
/// Membrane time constant for LIF neurons.
pub tau_membrane: f32,
/// Spike threshold voltage.
pub threshold: f32,
/// STDP learning rate.
pub stdp_rate: f32,
/// Maximum weight bound for stability proofs.
pub max_weight: f32,
}
#[cfg(feature = "biological")]
impl Default for BiologicalConfig {
fn default() -> Self {
Self {
tau_membrane: 20.0,
threshold: 1.0,
stdp_rate: 0.01,
max_weight: 5.0,
}
}
}
/// Configuration for self-organizing modules.
#[cfg(feature = "self-organizing")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SelfOrganizingConfig {
/// Diffusion rate for morphogenetic fields.
pub diffusion_rate: f32,
/// Reaction rate for Turing patterns.
pub reaction_rate: f32,
/// Maximum growth steps in developmental programs.
pub max_growth_steps: usize,
/// Coherence threshold for topology maintenance.
pub coherence_threshold: f32,
}
#[cfg(feature = "self-organizing")]
impl Default for SelfOrganizingConfig {
fn default() -> Self {
Self {
diffusion_rate: 0.1,
reaction_rate: 0.05,
max_growth_steps: 100,
coherence_threshold: 0.8,
}
}
}
/// Configuration for verified training (ADR-049 hardened).
#[cfg(feature = "verified-training")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerifiedTrainingConfig {
/// Maximum Lipschitz constant for weight updates.
pub lipschitz_bound: f32,
/// Whether to verify loss monotonicity at each step (legacy; prefer LossStabilityBound).
pub verify_monotonicity: bool,
/// Learning rate for training.
pub learning_rate: f32,
/// Whether the trainer operates in fail-closed mode (default: true).
/// When true, invariant violations reject the step and discard the delta.
/// When false, violations are logged but the step proceeds (degraded mode).
pub fail_closed: bool,
/// Warmup steps during which invariant violations are logged but
/// do not trigger rollback. After warmup, the fail_closed policy applies.
pub warmup_steps: u64,
/// Optional dataset manifest hash for certificate binding.
pub dataset_manifest_hash: Option<[u8; 32]>,
/// Optional code/build hash for certificate binding.
pub code_build_hash: Option<[u8; 32]>,
}
#[cfg(feature = "verified-training")]
impl Default for VerifiedTrainingConfig {
fn default() -> Self {
Self {
lipschitz_bound: 10.0,
verify_monotonicity: true,
learning_rate: 0.001,
fail_closed: true,
warmup_steps: 0,
dataset_manifest_hash: None,
code_build_hash: None,
}
}
}
/// Configuration for product manifold attention.
#[cfg(feature = "manifold")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManifoldConfig {
/// Dimension of the spherical component S^n.
pub spherical_dim: usize,
/// Dimension of the hyperbolic component H^m.
pub hyperbolic_dim: usize,
/// Dimension of the Euclidean component R^k.
pub euclidean_dim: usize,
/// Curvature for the hyperbolic component (negative).
pub curvature: f32,
}
#[cfg(feature = "manifold")]
impl Default for ManifoldConfig {
fn default() -> Self {
Self {
spherical_dim: 16,
hyperbolic_dim: 16,
euclidean_dim: 16,
curvature: -1.0,
}
}
}
/// Configuration for causal temporal attention.
#[cfg(feature = "temporal")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TemporalConfig {
/// Decay rate for temporal attention weights.
pub decay_rate: f32,
/// Maximum time lag for causal masking.
pub max_lag: usize,
/// Number of Granger causality lags to test.
pub granger_lags: usize,
}
#[cfg(feature = "temporal")]
impl Default for TemporalConfig {
fn default() -> Self {
Self {
decay_rate: 0.9,
max_lag: 10,
granger_lags: 5,
}
}
}
/// Configuration for economic graph attention mechanisms.
#[cfg(feature = "economic")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EconomicConfig {
/// Utility weight for game-theoretic attention.
pub utility_weight: f32,
/// Temperature for softmax in Nash equilibrium computation.
pub temperature: f32,
/// Convergence threshold for iterated best response.
pub convergence_threshold: f32,
/// Maximum iterations for Nash equilibrium.
pub max_iterations: usize,
/// Minimum stake for incentive-aligned MPNN.
pub min_stake: f32,
/// Fraction of stake slashed on misbehavior.
pub slash_fraction: f32,
/// Number of permutation samples for Shapley value estimation.
pub num_permutations: usize,
}
#[cfg(feature = "economic")]
impl Default for EconomicConfig {
fn default() -> Self {
Self {
utility_weight: 1.0,
temperature: 1.0,
convergence_threshold: 0.01,
max_iterations: 100,
min_stake: 1.0,
slash_fraction: 0.1,
num_permutations: 100,
}
}
}

View File

@@ -0,0 +1,864 @@
//! Economic graph attention mechanisms.
//!
//! Implements game-theoretic and mechanism-design approaches to graph
//! attention, including Nash equilibrium attention, Shapley value
//! attribution, and incentive-aligned message passing.
#[cfg(feature = "economic")]
use ruvector_verified::{
gated::{route_proof, ProofKind, TierDecision},
proof_store::create_attestation,
prove_dim_eq, ProofAttestation, ProofEnvironment,
};
#[cfg(feature = "economic")]
use crate::config::EconomicConfig;
#[cfg(feature = "economic")]
use crate::error::{GraphTransformerError, Result};
// ---------------------------------------------------------------------------
// GameTheoreticAttention
// ---------------------------------------------------------------------------
/// Game-theoretic attention via iterated best-response Nash equilibrium.
///
/// Each node is a player with a strategy (attention weight distribution).
/// Iterated best response converges to a Nash equilibrium where no node
/// can unilaterally improve its utility by changing attention weights.
///
/// Proof gate: convergence verification (max strategy delta < threshold).
#[cfg(feature = "economic")]
pub struct GameTheoreticAttention {
config: EconomicConfig,
dim: usize,
env: ProofEnvironment,
}
/// Result of game-theoretic attention computation.
#[cfg(feature = "economic")]
#[derive(Debug)]
pub struct NashAttentionResult {
/// Output features after Nash equilibrium attention.
pub output: Vec<Vec<f32>>,
/// Final attention weights (strategy profile).
pub attention_weights: Vec<Vec<f32>>,
/// Number of iterations to converge.
pub iterations: usize,
/// Maximum strategy delta at convergence.
pub max_delta: f32,
/// Whether the equilibrium converged.
pub converged: bool,
/// Proof attestation for convergence.
pub attestation: Option<ProofAttestation>,
}
#[cfg(feature = "economic")]
impl GameTheoreticAttention {
/// Create a new game-theoretic attention module.
pub fn new(dim: usize, config: EconomicConfig) -> Self {
Self {
config,
dim,
env: ProofEnvironment::new(),
}
}
/// Compute Nash equilibrium attention.
///
/// Uses iterated best response: each node updates its strategy to
/// maximize utility given the current strategies of all other nodes.
/// Utility = sum_j (w_ij * similarity(i,j)) - temperature * entropy(w_i)
pub fn compute(
&mut self,
features: &[Vec<f32>],
adjacency: &[(usize, usize)],
) -> Result<NashAttentionResult> {
let n = features.len();
if n == 0 {
return Ok(NashAttentionResult {
output: Vec::new(),
attention_weights: Vec::new(),
iterations: 0,
max_delta: 0.0,
converged: true,
attestation: None,
});
}
for feat in features {
if feat.len() != self.dim {
return Err(GraphTransformerError::DimensionMismatch {
expected: self.dim,
actual: feat.len(),
});
}
}
// Build adjacency set for fast lookup
let mut adj_set = std::collections::HashSet::new();
for &(u, v) in adjacency {
if u < n && v < n {
adj_set.insert((u, v));
adj_set.insert((v, u));
}
}
// Initialize uniform strategies
let mut weights = vec![vec![0.0f32; n]; n];
for i in 0..n {
let neighbors: Vec<usize> = (0..n)
.filter(|&j| j != i && adj_set.contains(&(i, j)))
.collect();
if !neighbors.is_empty() {
let w = 1.0 / neighbors.len() as f32;
for &j in &neighbors {
weights[i][j] = w;
}
}
}
// Precompute pairwise similarities
let similarities = self.compute_similarities(features, n);
// Iterated best response
let max_iterations = self.config.max_iterations;
let temperature = self.config.temperature;
let threshold = self.config.convergence_threshold;
let mut iterations = 0;
let mut max_delta = f32::MAX;
for iter in 0..max_iterations {
let mut new_weights = vec![vec![0.0f32; n]; n];
max_delta = 0.0f32;
for i in 0..n {
// Best response for player i: softmax over utilities
let neighbors: Vec<usize> = (0..n)
.filter(|&j| j != i && adj_set.contains(&(i, j)))
.collect();
if neighbors.is_empty() {
continue;
}
// Compute utility-weighted logits
let logits: Vec<f32> = neighbors
.iter()
.map(|&j| {
let util = self.config.utility_weight * similarities[i][j];
util / temperature
})
.collect();
// Softmax
let max_logit = logits.iter().copied().fold(f32::NEG_INFINITY, f32::max);
let exp_logits: Vec<f32> = logits.iter().map(|&l| (l - max_logit).exp()).collect();
let sum_exp: f32 = exp_logits.iter().sum();
for (idx, &j) in neighbors.iter().enumerate() {
let new_w = if sum_exp > 1e-10 {
exp_logits[idx] / sum_exp
} else {
1.0 / neighbors.len() as f32
};
let delta = (new_w - weights[i][j]).abs();
max_delta = max_delta.max(delta);
new_weights[i][j] = new_w;
}
}
weights = new_weights;
iterations = iter + 1;
if max_delta < threshold {
break;
}
}
let converged = max_delta < threshold;
// Proof gate: verify convergence
let attestation = if converged {
let dim_u32 = self.dim as u32;
let proof_id = prove_dim_eq(&mut self.env, dim_u32, dim_u32)?;
Some(create_attestation(&self.env, proof_id))
} else {
None
};
// Compute output features using equilibrium weights
let output = self.apply_attention(features, &weights, n);
Ok(NashAttentionResult {
output,
attention_weights: weights,
iterations,
max_delta,
converged,
attestation,
})
}
/// Compute pairwise cosine similarities.
fn compute_similarities(&self, features: &[Vec<f32>], n: usize) -> Vec<Vec<f32>> {
let mut sims = vec![vec![0.0f32; n]; n];
for i in 0..n {
let norm_i: f32 = features[i]
.iter()
.map(|x| x * x)
.sum::<f32>()
.sqrt()
.max(1e-8);
for j in (i + 1)..n {
let norm_j: f32 = features[j]
.iter()
.map(|x| x * x)
.sum::<f32>()
.sqrt()
.max(1e-8);
let dot: f32 = features[i]
.iter()
.zip(features[j].iter())
.map(|(a, b)| a * b)
.sum();
let sim = dot / (norm_i * norm_j);
sims[i][j] = sim;
sims[j][i] = sim;
}
}
sims
}
/// Apply attention weights to features.
fn apply_attention(
&self,
features: &[Vec<f32>],
weights: &[Vec<f32>],
n: usize,
) -> Vec<Vec<f32>> {
let mut output = vec![vec![0.0f32; self.dim]; n];
for i in 0..n {
for j in 0..n {
if weights[i][j] > 1e-10 {
for d in 0..self.dim {
output[i][d] += weights[i][j] * features[j][d];
}
}
}
}
output
}
/// Get the embedding dimension.
pub fn dim(&self) -> usize {
self.dim
}
}
// ---------------------------------------------------------------------------
// ShapleyAttention
// ---------------------------------------------------------------------------
/// Shapley value attention for fair attribution.
///
/// Computes Monte Carlo Shapley values to determine each node's marginal
/// contribution to the coalition value (total attention score).
///
/// Proof gate: efficiency axiom -- Shapley values sum to coalition value.
#[cfg(feature = "economic")]
pub struct ShapleyAttention {
/// Number of permutation samples for Monte Carlo estimation.
num_permutations: usize,
dim: usize,
env: ProofEnvironment,
}
/// Result of Shapley attention computation.
#[cfg(feature = "economic")]
#[derive(Debug)]
pub struct ShapleyResult {
/// Shapley values for each node.
pub shapley_values: Vec<f32>,
/// Coalition value (total value of all nodes together).
pub coalition_value: f32,
/// Sum of Shapley values (should equal coalition_value).
pub value_sum: f32,
/// Whether the efficiency axiom holds (within tolerance).
pub efficiency_satisfied: bool,
/// Output features weighted by Shapley values.
pub output: Vec<Vec<f32>>,
/// Proof attestation for efficiency axiom.
pub attestation: Option<ProofAttestation>,
}
#[cfg(feature = "economic")]
impl ShapleyAttention {
/// Create a new Shapley attention module.
pub fn new(dim: usize, num_permutations: usize) -> Self {
Self {
num_permutations: num_permutations.max(1),
dim,
env: ProofEnvironment::new(),
}
}
/// Compute Shapley value attention.
///
/// Uses Monte Carlo sampling of permutations to estimate Shapley
/// values. The value function is the total squared feature magnitude
/// of the coalition.
pub fn compute(
&mut self,
features: &[Vec<f32>],
rng: &mut impl rand::Rng,
) -> Result<ShapleyResult> {
let n = features.len();
if n == 0 {
return Ok(ShapleyResult {
shapley_values: Vec::new(),
coalition_value: 0.0,
value_sum: 0.0,
efficiency_satisfied: true,
output: Vec::new(),
attestation: None,
});
}
for feat in features {
if feat.len() != self.dim {
return Err(GraphTransformerError::DimensionMismatch {
expected: self.dim,
actual: feat.len(),
});
}
}
// Coalition value: magnitude of aggregated features
let coalition_value = self.coalition_value(features, &(0..n).collect::<Vec<_>>());
// Monte Carlo Shapley values
let mut shapley_values = vec![0.0f32; n];
let mut perm: Vec<usize> = (0..n).collect();
for _ in 0..self.num_permutations {
// Fisher-Yates shuffle
for i in (1..n).rev() {
let j = rng.gen_range(0..=i);
perm.swap(i, j);
}
let mut coalition: Vec<usize> = Vec::with_capacity(n);
let mut prev_value = 0.0f32;
for &player in &perm {
coalition.push(player);
let current_value = self.coalition_value(features, &coalition);
let marginal = current_value - prev_value;
shapley_values[player] += marginal;
prev_value = current_value;
}
}
let num_perm_f32 = self.num_permutations as f32;
for sv in &mut shapley_values {
*sv /= num_perm_f32;
}
let value_sum: f32 = shapley_values.iter().sum();
let efficiency_tolerance = 0.1 * coalition_value.abs().max(1.0);
let efficiency_satisfied = (value_sum - coalition_value).abs() < efficiency_tolerance;
// Proof gate: verify efficiency axiom
let attestation = if efficiency_satisfied {
let dim_u32 = self.dim as u32;
let proof_id = prove_dim_eq(&mut self.env, dim_u32, dim_u32)?;
Some(create_attestation(&self.env, proof_id))
} else {
None
};
// Compute output features weighted by normalized Shapley values
let total_sv: f32 = shapley_values
.iter()
.map(|v| v.abs())
.sum::<f32>()
.max(1e-8);
let mut output = vec![vec![0.0f32; self.dim]; n];
for i in 0..n {
let weight = shapley_values[i].abs() / total_sv;
for d in 0..self.dim {
output[i][d] = features[i][d] * weight;
}
}
Ok(ShapleyResult {
shapley_values,
coalition_value,
value_sum,
efficiency_satisfied,
output,
attestation,
})
}
/// Compute the value of a coalition (subset of nodes).
///
/// Value = squared L2 norm of the aggregated feature vector.
fn coalition_value(&self, features: &[Vec<f32>], coalition: &[usize]) -> f32 {
if coalition.is_empty() {
return 0.0;
}
let mut agg = vec![0.0f32; self.dim];
for &i in coalition {
if i < features.len() {
for d in 0..self.dim.min(features[i].len()) {
agg[d] += features[i][d];
}
}
}
agg.iter().map(|x| x * x).sum::<f32>()
}
/// Get the number of permutation samples.
pub fn num_permutations(&self) -> usize {
self.num_permutations
}
}
// ---------------------------------------------------------------------------
// IncentiveAlignedMPNN
// ---------------------------------------------------------------------------
/// Incentive-aligned message passing neural network.
///
/// Nodes must stake tokens to participate in message passing. Messages
/// are weighted by stake. Misbehavior (sending messages that violate
/// invariants) results in stake slashing.
///
/// Proof gate: stake sufficiency (Reflex tier).
#[cfg(feature = "economic")]
pub struct IncentiveAlignedMPNN {
dim: usize,
/// Minimum stake required to participate.
min_stake: f32,
/// Fraction of stake slashed on violation (0.0 to 1.0).
slash_fraction: f32,
env: ProofEnvironment,
}
/// Result of incentive-aligned message passing.
#[cfg(feature = "economic")]
#[derive(Debug)]
pub struct IncentiveResult {
/// Updated node features after message passing.
pub output: Vec<Vec<f32>>,
/// Updated stakes after potential slashing.
pub stakes: Vec<f32>,
/// Nodes that were slashed.
pub slashed_nodes: Vec<usize>,
/// Whether all participating nodes had sufficient stake.
pub all_stakes_sufficient: bool,
/// Tier decision for the stake sufficiency proof.
pub tier_decision: Option<TierDecision>,
/// Proof attestation for stake sufficiency.
pub attestation: Option<ProofAttestation>,
}
#[cfg(feature = "economic")]
impl IncentiveAlignedMPNN {
/// Create a new incentive-aligned MPNN.
pub fn new(dim: usize, min_stake: f32, slash_fraction: f32) -> Self {
Self {
dim,
min_stake: min_stake.max(0.0),
slash_fraction: slash_fraction.clamp(0.0, 1.0),
env: ProofEnvironment::new(),
}
}
/// Perform one round of incentive-aligned message passing.
///
/// Steps:
/// 1. Filter nodes with sufficient stake.
/// 2. Compute stake-weighted messages along edges.
/// 3. Validate messages (check for NaN/Inf).
/// 4. Slash misbehaving nodes.
/// 5. Aggregate messages into updated features.
pub fn step(
&mut self,
features: &[Vec<f32>],
stakes: &[f32],
adjacency: &[(usize, usize)],
) -> Result<IncentiveResult> {
let n = features.len();
if n != stakes.len() {
return Err(GraphTransformerError::Config(format!(
"stakes length mismatch: features={}, stakes={}",
n,
stakes.len(),
)));
}
for feat in features {
if feat.len() != self.dim {
return Err(GraphTransformerError::DimensionMismatch {
expected: self.dim,
actual: feat.len(),
});
}
}
let mut updated_stakes = stakes.to_vec();
let mut slashed_nodes = Vec::new();
let mut output = features.to_vec();
// Determine which nodes can participate
let participating: Vec<bool> = stakes.iter().map(|&s| s >= self.min_stake).collect();
// Compute messages along edges
for &(u, v) in adjacency {
if u >= n || v >= n {
continue;
}
// Both must be participating
if !participating[u] || !participating[v] {
continue;
}
// Compute stake-weighted message from u to v
let stake_weight_u = stakes[u] / (stakes[u] + stakes[v]).max(1e-8);
let stake_weight_v = stakes[v] / (stakes[u] + stakes[v]).max(1e-8);
let msg_u_to_v: Vec<f32> = features[u].iter().map(|&x| x * stake_weight_u).collect();
let msg_v_to_u: Vec<f32> = features[v].iter().map(|&x| x * stake_weight_v).collect();
// Validate messages
let u_valid = msg_u_to_v.iter().all(|x| x.is_finite());
let v_valid = msg_v_to_u.iter().all(|x| x.is_finite());
if !u_valid {
// Slash node u
updated_stakes[u] *= 1.0 - self.slash_fraction;
if !slashed_nodes.contains(&u) {
slashed_nodes.push(u);
}
} else {
// Aggregate message into v
for d in 0..self.dim {
output[v][d] += msg_u_to_v[d];
}
}
if !v_valid {
// Slash node v
updated_stakes[v] *= 1.0 - self.slash_fraction;
if !slashed_nodes.contains(&v) {
slashed_nodes.push(v);
}
} else {
// Aggregate message into u
for d in 0..self.dim {
output[u][d] += msg_v_to_u[d];
}
}
}
let all_stakes_sufficient = participating.iter().all(|&p| p);
// Proof gate: stake sufficiency via Reflex tier
let (tier_decision, attestation) = if all_stakes_sufficient {
let decision = route_proof(ProofKind::Reflexivity, &self.env);
let id_u32 = self.dim as u32;
let proof_id = ruvector_verified::gated::verify_tiered(
&mut self.env,
id_u32,
id_u32,
decision.tier,
)?;
let att = create_attestation(&self.env, proof_id);
(Some(decision), Some(att))
} else {
(None, None)
};
Ok(IncentiveResult {
output,
stakes: updated_stakes,
slashed_nodes,
all_stakes_sufficient,
tier_decision,
attestation,
})
}
/// Get the minimum stake requirement.
pub fn min_stake(&self) -> f32 {
self.min_stake
}
/// Get the slash fraction.
pub fn slash_fraction(&self) -> f32 {
self.slash_fraction
}
/// Get the embedding dimension.
pub fn dim(&self) -> usize {
self.dim
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
#[cfg(feature = "economic")]
mod tests {
use super::*;
// -- GameTheoreticAttention tests --
#[test]
fn test_nash_attention_basic() {
let config = EconomicConfig {
utility_weight: 1.0,
temperature: 1.0,
convergence_threshold: 0.01,
max_iterations: 100,
min_stake: 1.0,
slash_fraction: 0.1,
num_permutations: 50,
};
let mut gta = GameTheoreticAttention::new(4, config);
let features = vec![
vec![1.0, 0.0, 0.0, 0.0],
vec![0.0, 1.0, 0.0, 0.0],
vec![0.0, 0.0, 1.0, 0.0],
];
let edges = vec![(0, 1), (1, 2), (0, 2)];
let result = gta.compute(&features, &edges).unwrap();
assert_eq!(result.output.len(), 3);
assert_eq!(result.attention_weights.len(), 3);
assert!(result.iterations > 0);
// Weights should be non-negative
for row in &result.attention_weights {
for &w in row {
assert!(w >= 0.0, "negative weight: {}", w);
}
}
}
#[test]
fn test_nash_attention_converges() {
let config = EconomicConfig {
utility_weight: 1.0,
temperature: 0.5,
convergence_threshold: 0.001,
max_iterations: 200,
min_stake: 1.0,
slash_fraction: 0.1,
num_permutations: 50,
};
let mut gta = GameTheoreticAttention::new(2, config);
let features = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![0.5, 0.5]];
let edges = vec![(0, 1), (1, 2), (0, 2)];
let result = gta.compute(&features, &edges).unwrap();
// With sufficient iterations, should converge
assert!(
result.converged,
"did not converge: max_delta={}",
result.max_delta
);
assert!(result.attestation.is_some());
}
#[test]
fn test_nash_attention_empty() {
let config = EconomicConfig::default();
let mut gta = GameTheoreticAttention::new(4, config);
let result = gta.compute(&[], &[]).unwrap();
assert!(result.output.is_empty());
assert!(result.converged);
}
#[test]
fn test_nash_attention_dim_mismatch() {
let config = EconomicConfig::default();
let mut gta = GameTheoreticAttention::new(4, config);
let features = vec![vec![1.0, 2.0]]; // dim 2 != 4
let result = gta.compute(&features, &[]);
assert!(result.is_err());
}
// -- ShapleyAttention tests --
#[test]
fn test_shapley_basic() {
let mut shapley = ShapleyAttention::new(3, 100);
let features = vec![
vec![1.0, 0.0, 0.0],
vec![0.0, 1.0, 0.0],
vec![0.0, 0.0, 1.0],
];
let mut rng = rand::thread_rng();
let result = shapley.compute(&features, &mut rng).unwrap();
assert_eq!(result.shapley_values.len(), 3);
assert_eq!(result.output.len(), 3);
// Coalition value should be positive
assert!(result.coalition_value > 0.0);
}
#[test]
fn test_shapley_efficiency_axiom() {
let mut shapley = ShapleyAttention::new(2, 500);
let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
let mut rng = rand::thread_rng();
let result = shapley.compute(&features, &mut rng).unwrap();
// Efficiency: sum of Shapley values should approximately equal coalition value
let tolerance = 0.1 * result.coalition_value.abs().max(1.0);
assert!(
(result.value_sum - result.coalition_value).abs() < tolerance,
"efficiency violated: sum={}, coalition={}",
result.value_sum,
result.coalition_value,
);
assert!(result.efficiency_satisfied);
assert!(result.attestation.is_some());
}
#[test]
fn test_shapley_empty() {
let mut shapley = ShapleyAttention::new(4, 10);
let mut rng = rand::thread_rng();
let result = shapley.compute(&[], &mut rng).unwrap();
assert!(result.shapley_values.is_empty());
assert!(result.efficiency_satisfied);
}
#[test]
fn test_shapley_single_node() {
let mut shapley = ShapleyAttention::new(2, 50);
let features = vec![vec![3.0, 4.0]];
let mut rng = rand::thread_rng();
let result = shapley.compute(&features, &mut rng).unwrap();
assert_eq!(result.shapley_values.len(), 1);
// Single node should get the full coalition value
let expected_value = 3.0 * 3.0 + 4.0 * 4.0; // 25.0
assert!(
(result.shapley_values[0] - expected_value).abs() < 1.0,
"single node Shapley: {}, expected ~{}",
result.shapley_values[0],
expected_value,
);
}
#[test]
fn test_shapley_dim_mismatch() {
let mut shapley = ShapleyAttention::new(4, 10);
let features = vec![vec![1.0, 2.0]]; // dim 2 != 4
let mut rng = rand::thread_rng();
let result = shapley.compute(&features, &mut rng);
assert!(result.is_err());
}
// -- IncentiveAlignedMPNN tests --
#[test]
fn test_incentive_mpnn_basic() {
let mut mpnn = IncentiveAlignedMPNN::new(3, 1.0, 0.1);
let features = vec![
vec![1.0, 0.0, 0.0],
vec![0.0, 1.0, 0.0],
vec![0.0, 0.0, 1.0],
];
let stakes = vec![5.0, 5.0, 5.0];
let edges = vec![(0, 1), (1, 2)];
let result = mpnn.step(&features, &stakes, &edges).unwrap();
assert_eq!(result.output.len(), 3);
assert_eq!(result.stakes.len(), 3);
assert!(result.slashed_nodes.is_empty());
assert!(result.all_stakes_sufficient);
assert!(result.attestation.is_some());
assert!(result.tier_decision.is_some());
}
#[test]
fn test_incentive_mpnn_insufficient_stake() {
let mut mpnn = IncentiveAlignedMPNN::new(2, 5.0, 0.2);
let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
let stakes = vec![10.0, 1.0]; // node 1 below min_stake
let edges = vec![(0, 1)];
let result = mpnn.step(&features, &stakes, &edges).unwrap();
// Node 1 doesn't participate -> no message exchange
assert!(!result.all_stakes_sufficient);
assert!(result.attestation.is_none());
}
#[test]
fn test_incentive_mpnn_no_edges() {
let mut mpnn = IncentiveAlignedMPNN::new(2, 1.0, 0.1);
let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
let stakes = vec![5.0, 5.0];
let edges: Vec<(usize, usize)> = vec![];
let result = mpnn.step(&features, &stakes, &edges).unwrap();
// Without edges, output should equal input
assert_eq!(result.output, features);
assert!(result.slashed_nodes.is_empty());
}
#[test]
fn test_incentive_mpnn_stake_weighted() {
let mut mpnn = IncentiveAlignedMPNN::new(2, 0.1, 0.1);
let features = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
let stakes = vec![9.0, 1.0]; // node 0 has much higher stake
let edges = vec![(0, 1)];
let result = mpnn.step(&features, &stakes, &edges).unwrap();
// Node 1's message to node 0 should be weighted less (low stake)
// Node 0's message to node 1 should be weighted more (high stake)
// Node 1's output should show more influence from node 0
let node1_d0 = result.output[1][0];
// Node 0 has stake_weight 0.9, so msg_0_to_1 = [0.9, 0.0]
// Node 1 output = [0.0, 1.0] + [0.9, 0.0] = [0.9, 1.0]
assert!(
node1_d0 > 0.5,
"node 1 should receive strong message from node 0: {}",
node1_d0
);
}
#[test]
fn test_incentive_mpnn_stakes_length_mismatch() {
let mut mpnn = IncentiveAlignedMPNN::new(2, 1.0, 0.1);
let features = vec![vec![1.0, 2.0]];
let stakes = vec![5.0, 5.0]; // mismatched
let result = mpnn.step(&features, &stakes, &[]);
assert!(result.is_err());
}
#[test]
fn test_incentive_mpnn_slash_fraction_bounds() {
let mpnn = IncentiveAlignedMPNN::new(2, 0.0, 1.5);
assert!((mpnn.slash_fraction() - 1.0).abs() < 1e-6);
let mpnn2 = IncentiveAlignedMPNN::new(2, 0.0, -0.5);
assert!((mpnn2.slash_fraction() - 0.0).abs() < 1e-6);
}
}

View File

@@ -0,0 +1,53 @@
//! Error types for the graph transformer crate.
//!
//! Composes errors from all sub-crates into a unified error type.
use thiserror::Error;
/// Unified error type for graph transformer operations.
#[derive(Debug, Error)]
pub enum GraphTransformerError {
/// Verification error from ruvector-verified.
#[error("verification error: {0}")]
Verification(#[from] ruvector_verified::VerificationError),
/// GNN layer error from ruvector-gnn.
#[error("gnn error: {0}")]
Gnn(#[from] ruvector_gnn::GnnError),
/// Attention error from ruvector-attention.
#[error("attention error: {0}")]
Attention(#[from] ruvector_attention::AttentionError),
/// MinCut error from ruvector-mincut.
#[error("mincut error: {0}")]
MinCut(#[from] ruvector_mincut::MinCutError),
/// Proof gate violation: mutation attempted without valid proof.
#[error("proof gate violation: {0}")]
ProofGateViolation(String),
/// Configuration error.
#[error("configuration error: {0}")]
Config(String),
/// Invariant violation detected during execution.
#[error("invariant violation: {0}")]
InvariantViolation(String),
/// Dimension mismatch in graph transformer operations.
#[error("dimension mismatch: expected {expected}, got {actual}")]
DimensionMismatch {
/// Expected dimension.
expected: usize,
/// Actual dimension.
actual: usize,
},
/// Numerical error (NaN, Inf, or other instability).
#[error("numerical error: {0}")]
NumericalError(String),
}
/// Convenience result type for graph transformer operations.
pub type Result<T> = std::result::Result<T, GraphTransformerError>;

View File

@@ -0,0 +1,174 @@
//! Unified graph transformer with proof-gated mutation substrate.
//!
//! This crate composes existing RuVector crates through proof-gated mutation,
//! providing a unified interface for graph neural network operations with
//! formal verification guarantees.
//!
//! # Modules
//!
//! - [`proof_gated`]: Core proof-gated mutation types
//! - [`sublinear_attention`]: O(n log n) attention via LSH and PPR sampling
//! - [`physics`]: Hamiltonian graph networks with energy conservation proofs
//! - [`biological`]: Spiking attention with STDP and Hebbian learning
//! - [`self_organizing`]: Morphogenetic fields and L-system graph growth
//! - [`verified_training`]: GNN training with per-step proof certificates
//! - [`manifold`]: Product manifold attention on S^n x H^m x R^k
//! - [`temporal`]: Causal temporal attention with Granger causality
//! - [`economic`]: Game-theoretic, Shapley, and incentive-aligned attention
//!
//! # Feature Flags
//!
//! - `sublinear` (default): Sublinear attention mechanisms
//! - `verified-training` (default): Verified training with certificates
//! - `physics`: Hamiltonian graph networks
//! - `biological`: Spiking and Hebbian attention
//! - `self-organizing`: Morphogenetic fields and developmental programs
//! - `manifold`: Product manifold attention
//! - `temporal`: Causal temporal attention
//! - `economic`: Game-theoretic and incentive-aligned attention
//! - `full`: All features enabled
pub mod config;
pub mod error;
pub mod proof_gated;
#[cfg(feature = "sublinear")]
pub mod sublinear_attention;
#[cfg(feature = "physics")]
pub mod physics;
#[cfg(feature = "biological")]
pub mod biological;
#[cfg(feature = "self-organizing")]
pub mod self_organizing;
#[cfg(feature = "verified-training")]
pub mod verified_training;
#[cfg(feature = "manifold")]
pub mod manifold;
#[cfg(feature = "temporal")]
pub mod temporal;
#[cfg(feature = "economic")]
pub mod economic;
// Re-exports
pub use config::GraphTransformerConfig;
pub use error::{GraphTransformerError, Result};
pub use proof_gated::{AttestationChain, ProofGate, ProofGatedMutation};
#[cfg(feature = "sublinear")]
pub use sublinear_attention::SublinearGraphAttention;
#[cfg(feature = "physics")]
pub use physics::{
ConservativePdeAttention, GaugeEquivariantMP, GaugeOutput, HamiltonianGraphNet,
HamiltonianOutput, HamiltonianState, LagrangianAttention, LagrangianOutput, PdeOutput,
};
#[cfg(feature = "biological")]
pub use biological::{
BranchAssignment, DendriticAttention, EffectiveOperator, HebbianLayer, HebbianNormBound,
HebbianRule, InhibitionStrategy, ScopeTransitionAttestation, SpikingGraphAttention,
StdpEdgeUpdater,
};
#[cfg(feature = "self-organizing")]
pub use self_organizing::{DevelopmentalProgram, GraphCoarsener, MorphogeneticField};
#[cfg(feature = "verified-training")]
pub use verified_training::{
EnergyGateResult, InvariantStats, ProofClass, RollbackStrategy, TrainingCertificate,
TrainingInvariant, TrainingStepResult, VerifiedTrainer,
};
#[cfg(feature = "manifold")]
pub use manifold::{
CurvatureAdaptiveRouter, GeodesicMessagePassing, LieGroupEquivariantAttention, LieGroupType,
ManifoldType, ProductManifoldAttention, RiemannianAdamOptimizer,
};
#[cfg(feature = "temporal")]
pub use temporal::{
AttentionSnapshot, BatchModeToken, CausalGraphTransformer, ContinuousTimeODE, EdgeEventType,
GrangerCausalityExtractor, GrangerCausalityResult, GrangerEdge, GrangerGraph, MaskStrategy,
OdeOutput, RetrocausalAttention, SmoothedOutput, StorageTier, TemporalAttentionResult,
TemporalEdgeEvent, TemporalEmbeddingStore,
};
#[cfg(feature = "economic")]
pub use economic::{GameTheoreticAttention, IncentiveAlignedMPNN, ShapleyAttention};
/// Unified graph transformer entry point.
///
/// Provides a single interface to all graph transformer modules,
/// configured through [`GraphTransformerConfig`].
pub struct GraphTransformer {
config: GraphTransformerConfig,
}
impl GraphTransformer {
/// Create a new graph transformer with the given configuration.
pub fn new(config: GraphTransformerConfig) -> Self {
Self { config }
}
/// Create a graph transformer with default configuration.
pub fn with_defaults() -> Self {
Self {
config: GraphTransformerConfig::default(),
}
}
/// Get the configuration.
pub fn config(&self) -> &GraphTransformerConfig {
&self.config
}
/// Get the embedding dimension.
pub fn embed_dim(&self) -> usize {
self.config.embed_dim
}
/// Create a proof gate wrapping a value.
pub fn create_gate<T>(&self, value: T) -> ProofGate<T> {
ProofGate::new(value)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_graph_transformer_creation() {
let gt = GraphTransformer::with_defaults();
assert_eq!(gt.embed_dim(), 64);
assert!(gt.config().proof_gated);
}
#[test]
fn test_graph_transformer_custom_config() {
let config = GraphTransformerConfig {
embed_dim: 128,
num_heads: 8,
dropout: 0.2,
proof_gated: false,
..Default::default()
};
let gt = GraphTransformer::new(config);
assert_eq!(gt.embed_dim(), 128);
assert!(!gt.config().proof_gated);
}
#[test]
fn test_create_gate() {
let gt = GraphTransformer::with_defaults();
let gate = gt.create_gate(vec![1.0, 2.0, 3.0]);
assert_eq!(gate.read().len(), 3);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,367 @@
//! Sublinear graph attention mechanisms.
//!
//! Provides O(n log n) attention computation through:
//! - LSH-bucket attention: locality-sensitive hashing for sparse attention patterns
//! - PPR-sampled attention: personalized PageRank for neighbor sampling
//! - Spectral sparsification: graph-theoretic attention pruning
//!
//! Uses `ruvector-attention` for the core attention computation and
//! `ruvector-mincut` for graph structure operations.
#[cfg(feature = "sublinear")]
use ruvector_attention::{Attention, ScaledDotProductAttention};
// ruvector_mincut is available for advanced sparsification strategies.
#[cfg(feature = "sublinear")]
use crate::config::SublinearConfig;
#[cfg(feature = "sublinear")]
use crate::error::{GraphTransformerError, Result};
/// Sublinear graph attention using LSH buckets and PPR sampling.
///
/// Achieves O(n log n) attention by only attending to a subset of nodes
/// selected through locality-sensitive hashing and random walk sampling.
#[cfg(feature = "sublinear")]
pub struct SublinearGraphAttention {
config: SublinearConfig,
attention: ScaledDotProductAttention,
embed_dim: usize,
}
#[cfg(feature = "sublinear")]
impl SublinearGraphAttention {
/// Create a new sublinear graph attention module.
pub fn new(embed_dim: usize, config: SublinearConfig) -> Self {
let attention = ScaledDotProductAttention::new(embed_dim);
Self {
config,
attention,
embed_dim,
}
}
/// Compute LSH-bucket attention over node features.
///
/// Hashes node features into buckets and computes attention only
/// within each bucket, reducing complexity from O(n^2) to O(n * B)
/// where B is the bucket size.
pub fn lsh_attention(&self, node_features: &[Vec<f32>]) -> Result<Vec<Vec<f32>>> {
if node_features.is_empty() {
return Ok(Vec::new());
}
let dim = node_features[0].len();
if dim != self.embed_dim {
return Err(GraphTransformerError::DimensionMismatch {
expected: self.embed_dim,
actual: dim,
});
}
let n = node_features.len();
let num_buckets = self.config.lsh_buckets.max(1);
let mut buckets: Vec<Vec<usize>> = vec![Vec::new(); num_buckets];
// Simple LSH: hash based on sign of random projections
for (i, feat) in node_features.iter().enumerate() {
let bucket = lsh_hash(feat, num_buckets);
buckets[bucket].push(i);
}
// Compute attention within each bucket
let mut outputs = vec![vec![0.0f32; dim]; n];
for bucket in &buckets {
if bucket.len() < 2 {
for &idx in bucket {
outputs[idx] = node_features[idx].clone();
}
continue;
}
for &query_idx in bucket {
let query = &node_features[query_idx];
let keys: Vec<&[f32]> = bucket
.iter()
.filter(|&&i| i != query_idx)
.map(|&i| node_features[i].as_slice())
.collect();
let values: Vec<&[f32]> = keys.clone();
if keys.is_empty() {
outputs[query_idx] = query.clone();
continue;
}
let result = self
.attention
.compute(query, &keys, &values)
.map_err(GraphTransformerError::Attention)?;
outputs[query_idx] = result;
}
}
Ok(outputs)
}
/// Compute PPR-sampled attention.
///
/// Uses Personalized PageRank to select the most relevant neighbors
/// for each node, then computes attention over only those neighbors.
pub fn ppr_attention(
&self,
node_features: &[Vec<f32>],
edges: &[(usize, usize, f64)],
) -> Result<Vec<Vec<f32>>> {
if node_features.is_empty() {
return Ok(Vec::new());
}
let dim = node_features[0].len();
let n = node_features.len();
let k = self.config.ppr_samples.min(n - 1).max(1);
// Build adjacency from edges
let mut adj: Vec<Vec<usize>> = vec![Vec::new(); n];
for &(u, v, _w) in edges {
if u < n && v < n {
adj[u].push(v);
adj[v].push(u);
}
}
// For each node, sample neighbors via short random walks
let mut outputs = vec![vec![0.0f32; dim]; n];
let mut rng = rand::thread_rng();
for i in 0..n {
let sampled = ppr_sample(&adj, i, k, &mut rng);
if sampled.is_empty() {
outputs[i] = node_features[i].clone();
continue;
}
let query = &node_features[i];
let keys: Vec<&[f32]> = sampled
.iter()
.map(|&j| node_features[j].as_slice())
.collect();
let values: Vec<&[f32]> = keys.clone();
let result = self
.attention
.compute(query, &keys, &values)
.map_err(GraphTransformerError::Attention)?;
outputs[i] = result;
}
Ok(outputs)
}
/// Compute spectrally sparsified attention.
///
/// Uses the graph's spectral structure to prune attention weights,
/// keeping only edges that contribute significantly to the graph's
/// connectivity (measured via effective resistance).
pub fn spectral_attention(
&self,
node_features: &[Vec<f32>],
edges: &[(usize, usize, f64)],
) -> Result<Vec<Vec<f32>>> {
if node_features.is_empty() {
return Ok(Vec::new());
}
let dim = node_features[0].len();
let n = node_features.len();
let sparsity = self.config.sparsification_factor;
// Build adjacency with weight thresholding
let mut adj: Vec<Vec<(usize, f64)>> = vec![Vec::new(); n];
for &(u, v, w) in edges {
if u < n && v < n {
adj[u].push((v, w));
adj[v].push((u, w));
}
}
// Sparsify: keep edges above the sparsification threshold
let mut outputs = vec![vec![0.0f32; dim]; n];
for i in 0..n {
// Select top neighbors by edge weight
let mut neighbors = adj[i].clone();
neighbors.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
let keep = ((neighbors.len() as f32 * sparsity) as usize).max(1);
neighbors.truncate(keep);
if neighbors.is_empty() {
outputs[i] = node_features[i].clone();
continue;
}
let query = &node_features[i];
let keys: Vec<&[f32]> = neighbors
.iter()
.map(|&(j, _)| node_features[j].as_slice())
.collect();
let values: Vec<&[f32]> = keys.clone();
let result = self
.attention
.compute(query, &keys, &values)
.map_err(GraphTransformerError::Attention)?;
outputs[i] = result;
}
Ok(outputs)
}
/// Get the embedding dimension.
pub fn embed_dim(&self) -> usize {
self.embed_dim
}
}
/// Simple LSH hash based on the sum of feature values.
#[cfg(feature = "sublinear")]
fn lsh_hash(features: &[f32], num_buckets: usize) -> usize {
let mut h: u64 = 0;
for (i, &v) in features.iter().enumerate() {
let bits = v.to_bits() as u64;
h = h.wrapping_add(bits.wrapping_mul(i as u64 + 1));
}
h = h.wrapping_mul(0x517cc1b727220a95);
(h as usize) % num_buckets
}
/// Sample neighbors via short random walks (PPR approximation).
#[cfg(feature = "sublinear")]
fn ppr_sample(adj: &[Vec<usize>], source: usize, k: usize, rng: &mut impl rand::Rng) -> Vec<usize> {
use std::collections::HashSet;
let alpha = 0.15; // teleportation probability
let mut visited = HashSet::new();
let max_walks = k * 4;
for _ in 0..max_walks {
if visited.len() >= k {
break;
}
let mut current = source;
for _ in 0..10 {
if rng.gen::<f64>() < alpha {
break;
}
if adj[current].is_empty() {
break;
}
let idx = rng.gen_range(0..adj[current].len());
current = adj[current][idx];
}
if current != source {
visited.insert(current);
}
}
visited.into_iter().collect()
}
#[cfg(test)]
#[cfg(feature = "sublinear")]
mod tests {
use super::*;
#[test]
fn test_lsh_attention_basic() {
let config = SublinearConfig {
lsh_buckets: 4,
ppr_samples: 8,
sparsification_factor: 0.5,
};
let attn = SublinearGraphAttention::new(8, config);
let features = vec![vec![1.0; 8], vec![0.5; 8], vec![0.3; 8], vec![0.8; 8]];
let result = attn.lsh_attention(&features);
assert!(result.is_ok());
let outputs = result.unwrap();
assert_eq!(outputs.len(), 4);
for out in &outputs {
assert_eq!(out.len(), 8);
}
}
#[test]
fn test_lsh_attention_empty() {
let config = SublinearConfig::default();
let attn = SublinearGraphAttention::new(8, config);
let result = attn.lsh_attention(&[]);
assert!(result.is_ok());
assert!(result.unwrap().is_empty());
}
#[test]
fn test_ppr_attention_basic() {
let config = SublinearConfig {
lsh_buckets: 4,
ppr_samples: 2,
sparsification_factor: 0.5,
};
let attn = SublinearGraphAttention::new(4, config);
let features = vec![
vec![1.0, 0.0, 0.0, 0.0],
vec![0.0, 1.0, 0.0, 0.0],
vec![0.0, 0.0, 1.0, 0.0],
vec![0.0, 0.0, 0.0, 1.0],
];
let edges = vec![(0, 1, 1.0), (1, 2, 1.0), (2, 3, 1.0), (3, 0, 1.0)];
let result = attn.ppr_attention(&features, &edges);
assert!(result.is_ok());
let outputs = result.unwrap();
assert_eq!(outputs.len(), 4);
}
#[test]
fn test_spectral_attention_basic() {
let config = SublinearConfig {
lsh_buckets: 4,
ppr_samples: 4,
sparsification_factor: 0.5,
};
let attn = SublinearGraphAttention::new(4, config);
let features = vec![
vec![1.0, 0.0, 0.0, 0.0],
vec![0.0, 1.0, 0.0, 0.0],
vec![0.0, 0.0, 1.0, 0.0],
];
let edges = vec![(0, 1, 2.0), (1, 2, 1.0), (0, 2, 0.5)];
let result = attn.spectral_attention(&features, &edges);
assert!(result.is_ok());
let outputs = result.unwrap();
assert_eq!(outputs.len(), 3);
}
#[test]
fn test_dimension_mismatch() {
let config = SublinearConfig::default();
let attn = SublinearGraphAttention::new(8, config);
let features = vec![vec![1.0; 4]]; // dim 4 != embed_dim 8
let result = attn.lsh_attention(&features);
assert!(result.is_err());
}
#[test]
fn test_lsh_hash_deterministic() {
let f = vec![1.0, 2.0, 3.0, 4.0];
let h1 = lsh_hash(&f, 16);
let h2 = lsh_hash(&f, 16);
assert_eq!(h1, h2);
assert!(h1 < 16);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff