Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,68 @@
[package]
name = "spiking-network"
version = "0.1.0"
edition = "2021"
rust-version = "1.77"
license = "MIT"
authors = ["Ruvector Team"]
description = "Event-driven spiking neural network for ASIC-optimized neuromorphic computing"
readme = "docs/README.md"
[dependencies]
# Core ruvector dependencies
ruvector-core = { path = "../../crates/ruvector-core", default-features = false }
ruvector-gnn = { path = "../../crates/ruvector-gnn", default-features = false }
# Math and numerics
ndarray = { version = "0.16", features = ["serde"] }
rand = "0.8"
rand_distr = "0.4"
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
# Error handling
thiserror = "2.0"
anyhow = "1.0"
# Performance
rayon = "1.10"
parking_lot = "0.12"
dashmap = "6.1"
# Collections for sparse operations
indexmap = { version = "2.0", features = ["serde"] }
smallvec = { version = "1.11", features = ["serde"] }
# Bitsets for spike encoding
bitvec = { version = "1.0", features = ["serde"] }
# Priority queue for event scheduling
priority-queue = "2.0"
[dev-dependencies]
criterion = { version = "0.5", features = ["html_reports"] }
proptest = "1.5"
[features]
default = ["simd"]
simd = []
wasm = []
visualization = []
[[bench]]
name = "spiking_bench"
harness = false
[[example]]
name = "edge_detection"
path = "src/examples/edge_detection.rs"
[[example]]
name = "pattern_recognition"
path = "src/examples/pattern_recognition.rs"
[[example]]
name = "asic_simulation"
path = "src/examples/asic_simulation.rs"

View File

@@ -0,0 +1,388 @@
//! Spike encoding and decoding utilities.
//!
//! This module provides methods to convert between analog signals and sparse spike trains.
//!
//! ## Encoding Schemes
//!
//! - **Rate coding**: Spike frequency encodes magnitude
//! - **Temporal coding**: Spike timing encodes information
//! - **Population coding**: Distributed representation across neurons
//! - **Delta modulation**: Spikes encode changes only
//!
//! ## ASIC Benefits
//!
//! Sparse spike representations dramatically reduce:
//! - Memory bandwidth (only store/transmit active spikes)
//! - Computation (skip silent neurons entirely)
//! - Power consumption (event-driven processing)
use bitvec::prelude::*;
use serde::{Deserialize, Serialize};
use smallvec::SmallVec;
/// A single spike event with source and timing.
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub struct SpikeEvent {
/// Source neuron index
pub source: u32,
/// Timestamp in simulation time units
pub time: f32,
/// Optional payload (for routing or plasticity)
pub payload: u8,
}
impl SpikeEvent {
/// Create a new spike event.
pub fn new(source: u32, time: f32) -> Self {
Self {
source,
time,
payload: 0,
}
}
/// Create spike with payload.
pub fn with_payload(source: u32, time: f32, payload: u8) -> Self {
Self { source, time, payload }
}
}
/// A train of spikes from a single neuron over time.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SpikeTrain {
/// Neuron ID
pub neuron_id: u32,
/// Spike times (sorted ascending)
pub times: Vec<f32>,
}
impl SpikeTrain {
/// Create empty spike train for a neuron.
pub fn new(neuron_id: u32) -> Self {
Self {
neuron_id,
times: Vec::new(),
}
}
/// Add a spike at given time.
pub fn add_spike(&mut self, time: f32) {
self.times.push(time);
}
/// Get spike count.
pub fn spike_count(&self) -> usize {
self.times.len()
}
/// Calculate firing rate over duration.
pub fn firing_rate(&self, duration: f32) -> f32 {
if duration <= 0.0 {
return 0.0;
}
self.times.len() as f32 / duration * 1000.0 // Hz
}
/// Get inter-spike intervals.
pub fn isis(&self) -> Vec<f32> {
if self.times.len() < 2 {
return Vec::new();
}
self.times.windows(2).map(|w| w[1] - w[0]).collect()
}
}
/// Sparse spike matrix for population activity.
///
/// Uses compressed representation - only stores active spikes.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SparseSpikes {
/// Number of neurons in population
pub num_neurons: u32,
/// Number of time bins
pub num_timesteps: u32,
/// Spike events (sorted by time)
pub events: Vec<SpikeEvent>,
}
impl SparseSpikes {
/// Create empty spike matrix.
pub fn new(num_neurons: u32, num_timesteps: u32) -> Self {
Self {
num_neurons,
num_timesteps,
events: Vec::new(),
}
}
/// Add a spike event.
pub fn add_spike(&mut self, neuron: u32, timestep: u32) {
if neuron < self.num_neurons && timestep < self.num_timesteps {
self.events.push(SpikeEvent::new(neuron, timestep as f32));
}
}
/// Get sparsity (fraction of silent entries).
pub fn sparsity(&self) -> f32 {
let total = self.num_neurons as f64 * self.num_timesteps as f64;
if total == 0.0 {
return 1.0;
}
1.0 - (self.events.len() as f64 / total) as f32
}
/// Get neurons that spiked at given timestep.
pub fn spikes_at(&self, timestep: u32) -> SmallVec<[u32; 8]> {
self.events
.iter()
.filter(|e| e.time as u32 == timestep)
.map(|e| e.source)
.collect()
}
/// Get spike count.
pub fn spike_count(&self) -> usize {
self.events.len()
}
}
/// Encoder for converting analog values to spike trains.
pub struct SpikeEncoder;
impl SpikeEncoder {
/// Rate coding: Convert value to spike probability per timestep.
///
/// Higher values produce more frequent spikes.
/// Returns sparse bit vector of spike times.
pub fn rate_encode(value: f32, duration_ms: f32, dt: f32, max_rate_hz: f32) -> BitVec {
let num_steps = (duration_ms / dt) as usize;
let mut spikes = bitvec![0; num_steps];
// Clamp value to [0, 1]
let normalized = value.clamp(0.0, 1.0);
// Convert to spike probability per timestep
let prob_per_step = normalized * max_rate_hz * dt / 1000.0;
// Generate spikes stochastically
use rand::Rng;
let mut rng = rand::thread_rng();
for i in 0..num_steps {
if rng.gen::<f32>() < prob_per_step {
spikes.set(i, true);
}
}
spikes
}
/// Temporal coding: First spike time encodes value.
///
/// Lower values spike earlier (inverse temporal coding).
pub fn temporal_encode(value: f32, max_latency_ms: f32) -> f32 {
let normalized = value.clamp(0.0, 1.0);
// Invert: high value = early spike
(1.0 - normalized) * max_latency_ms
}
/// Delta modulation: Spike on significant change.
///
/// Returns (+1, 0, -1) for increase, no change, decrease.
pub fn delta_encode(current: f32, previous: f32, threshold: f32) -> i8 {
let delta = current - previous;
if delta > threshold {
1 // Positive spike
} else if delta < -threshold {
-1 // Negative spike
} else {
0 // No spike
}
}
/// Population coding: Distribute value across multiple neurons.
///
/// Returns spike pattern across `num_neurons` with Gaussian tuning curves.
pub fn population_encode(value: f32, num_neurons: usize, sigma: f32) -> Vec<f32> {
let mut activities = vec![0.0; num_neurons];
let centers: Vec<f32> = (0..num_neurons)
.map(|i| i as f32 / (num_neurons - 1).max(1) as f32)
.collect();
for (i, &center) in centers.iter().enumerate() {
let diff = value - center;
activities[i] = (-diff * diff / (2.0 * sigma * sigma)).exp();
}
activities
}
/// Convert image patch to spike-based representation.
///
/// Uses difference-of-Gaussians for edge detection,
/// then temporal coding for spike generation.
pub fn encode_image_patch(
patch: &[f32],
width: usize,
height: usize,
) -> SparseSpikes {
let mut spikes = SparseSpikes::new((width * height) as u32, 100);
// Simple intensity-based encoding
for (i, &pixel) in patch.iter().enumerate() {
if i >= width * height {
break;
}
// Higher intensity = earlier spike
let spike_time = ((1.0 - pixel.clamp(0.0, 1.0)) * 99.0) as u32;
if pixel > 0.1 {
// Threshold
spikes.add_spike(i as u32, spike_time);
}
}
spikes
}
}
/// Decoder for converting spike trains back to analog values.
pub struct SpikeDecoder;
impl SpikeDecoder {
/// Decode rate-coded spikes to value.
pub fn rate_decode(spikes: &BitVec, dt: f32, max_rate_hz: f32) -> f32 {
let spike_count = spikes.count_ones();
let duration_ms = spikes.len() as f32 * dt;
let rate_hz = spike_count as f32 / duration_ms * 1000.0;
(rate_hz / max_rate_hz).clamp(0.0, 1.0)
}
/// Decode temporally-coded spike time to value.
pub fn temporal_decode(spike_time: f32, max_latency_ms: f32) -> f32 {
1.0 - (spike_time / max_latency_ms).clamp(0.0, 1.0)
}
/// Decode population activity to value using center-of-mass.
pub fn population_decode(activities: &[f32]) -> f32 {
let num_neurons = activities.len();
if num_neurons == 0 {
return 0.5;
}
let mut weighted_sum = 0.0;
let mut total_weight = 0.0;
for (i, &activity) in activities.iter().enumerate() {
let center = i as f32 / (num_neurons - 1).max(1) as f32;
weighted_sum += center * activity;
total_weight += activity;
}
if total_weight > 0.0 {
weighted_sum / total_weight
} else {
0.5
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_spike_event_creation() {
let event = SpikeEvent::new(42, 10.5);
assert_eq!(event.source, 42);
assert_eq!(event.time, 10.5);
assert_eq!(event.payload, 0);
}
#[test]
fn test_spike_train() {
let mut train = SpikeTrain::new(0);
train.add_spike(10.0);
train.add_spike(30.0);
train.add_spike(50.0);
assert_eq!(train.spike_count(), 3);
assert!((train.firing_rate(100.0) - 30.0).abs() < 0.1);
let isis = train.isis();
assert_eq!(isis, vec![20.0, 20.0]);
}
#[test]
fn test_sparse_spikes() {
let mut spikes = SparseSpikes::new(100, 1000);
spikes.add_spike(0, 10);
spikes.add_spike(50, 10);
spikes.add_spike(99, 500);
assert_eq!(spikes.spike_count(), 3);
assert!(spikes.sparsity() > 0.99); // Very sparse
let at_10 = spikes.spikes_at(10);
assert_eq!(at_10.len(), 2);
}
#[test]
fn test_rate_encoding() {
// High value should produce more spikes
let high_spikes = SpikeEncoder::rate_encode(0.9, 100.0, 1.0, 100.0);
let low_spikes = SpikeEncoder::rate_encode(0.1, 100.0, 1.0, 100.0);
// Statistical test - not deterministic
assert!(high_spikes.count_ones() > low_spikes.count_ones() / 2);
}
#[test]
fn test_temporal_encoding() {
let early = SpikeEncoder::temporal_encode(0.9, 100.0);
let late = SpikeEncoder::temporal_encode(0.1, 100.0);
assert!(early < late); // High value = early spike
}
#[test]
fn test_delta_encoding() {
assert_eq!(SpikeEncoder::delta_encode(1.0, 0.0, 0.5), 1);
assert_eq!(SpikeEncoder::delta_encode(0.0, 1.0, 0.5), -1);
assert_eq!(SpikeEncoder::delta_encode(0.5, 0.5, 0.5), 0);
}
#[test]
fn test_population_encoding() {
let activities = SpikeEncoder::population_encode(0.5, 10, 0.2);
assert_eq!(activities.len(), 10);
// Middle neuron should have highest activity
let max_idx = activities
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
.map(|(i, _)| i)
.unwrap();
assert_eq!(max_idx, 4); // Close to middle
}
#[test]
fn test_rate_decode() {
let mut spikes = bitvec![0; 100];
// 10 spikes in 100 timesteps at dt=1ms = 100 Hz
for i in (0..100).step_by(10) {
spikes.set(i, true);
}
let decoded = SpikeDecoder::rate_decode(&spikes, 1.0, 100.0);
assert!((decoded - 1.0).abs() < 0.1);
}
#[test]
fn test_population_decode() {
// Peak at middle
let activities = vec![0.0, 0.1, 0.5, 1.0, 0.5, 0.1, 0.0];
let decoded = SpikeDecoder::population_decode(&activities);
assert!((decoded - 0.5).abs() < 0.1);
}
}

View File

@@ -0,0 +1,46 @@
//! Error types for the spiking neural network library.
use thiserror::Error;
/// Result type alias for spiking network operations.
pub type Result<T> = std::result::Result<T, SpikingError>;
/// Errors that can occur in spiking neural network operations.
#[derive(Error, Debug)]
pub enum SpikingError {
/// Invalid neuron parameters
#[error("Invalid neuron parameters: {0}")]
InvalidParams(String),
/// Network topology error
#[error("Network topology error: {0}")]
TopologyError(String),
/// Spike encoding error
#[error("Spike encoding error: {0}")]
EncodingError(String),
/// Router error
#[error("Router error: {0}")]
RouterError(String),
/// Learning error
#[error("Learning error: {0}")]
LearningError(String),
/// Resource exhaustion
#[error("Resource exhaustion: {0}")]
ResourceExhausted(String),
/// Invalid operation
#[error("Invalid operation: {0}")]
InvalidOperation(String),
/// IO error wrapper
#[error("IO error: {0}")]
IoError(#[from] std::io::Error),
/// Serialization error
#[error("Serialization error: {0}")]
SerializationError(String),
}

View File

@@ -0,0 +1,71 @@
//! # Spiking Neural Network Library
//!
//! Event-driven spiking neural network implementation optimized for ASIC deployment.
//!
//! ## Philosophy
//!
//! Spiking neural networks do not compute in the traditional sense. They fire only when
//! something meaningful happens. Everything is event-driven. This single shift changes
//! the entire energy and timing model of your ASIC.
//!
//! A conventional network evaluates every neuron every cycle. It burns power on
//! multiplications even when nothing is changing. A spiking model skips all of that.
//! Neurons stay silent until a threshold is crossed. You only compute on change.
//!
//! ## Architecture Benefits
//!
//! - **Sparse computation**: Only active neurons consume resources
//! - **Event-driven**: No wasted cycles on unchanged state
//! - **Local connectivity**: Minimizes routing complexity
//! - **Tiny events**: Each spike is just a few bits
//! - **Microsecond latency**: Local lookups instead of matrix multiplies
//!
//! ## Usage
//!
//! ```rust,ignore
//! use spiking_network::{
//! neuron::{LIFNeuron, NeuronParams},
//! network::SpikingNetwork,
//! encoding::SpikeEncoder,
//! };
//!
//! // Create a network with 1000 neurons
//! let mut network = SpikingNetwork::new(1000);
//!
//! // Encode input as sparse spikes
//! let spikes = SpikeEncoder::rate_encode(&input_data, 0.1);
//!
//! // Process - only fires on meaningful events
//! let output = network.process(&spikes);
//! ```
#![warn(missing_docs)]
#![deny(unsafe_op_in_unsafe_fn)]
pub mod encoding;
pub mod error;
pub mod learning;
pub mod network;
pub mod neuron;
pub mod router;
// Re-exports for convenience
pub use encoding::{SpikeEncoder, SpikeEvent, SpikeTrain};
pub use error::{Result, SpikingError};
pub use learning::{STDPConfig, STDPLearning};
pub use network::{NetworkConfig, NetworkStats, SpikingNetwork};
pub use neuron::{IzhikevichNeuron, LIFNeuron, NeuronParams, SpikingNeuron};
pub use router::{AsicRouter, RouterConfig, SpikePacket};
/// Library version
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_version() {
assert!(!VERSION.is_empty());
}
}

View File

@@ -0,0 +1,544 @@
//! Spiking neural network implementation.
//!
//! This module provides the core network structure with event-driven processing.
//!
//! ## Event-Driven Architecture
//!
//! Unlike conventional ANNs that evaluate every neuron every cycle, this network:
//! - Processes only when spikes arrive
//! - Skips silent neurons entirely
//! - Routes tiny spike events (few bits each)
//! - Maintains microsecond-scale latency
//!
//! ## Network Topologies
//!
//! Supports ASIC-friendly connectivity patterns:
//! - Local 2D grids (minimal routing)
//! - Small-world networks (efficient paths)
//! - Hierarchical layers (feedforward)
//! - Custom sparse connectivity
mod synapse;
mod topology;
pub use synapse::{Synapse, SynapseType};
pub use topology::{ConnectionPattern, LocalConnectivity, TopologyConfig};
use crate::encoding::{SparseSpikes, SpikeEvent};
use crate::error::{Result, SpikingError};
use crate::neuron::{LIFNeuron, SpikingNeuron};
use indexmap::IndexMap;
use parking_lot::RwLock;
use priority_queue::PriorityQueue;
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use smallvec::SmallVec;
use std::cmp::Reverse;
use std::sync::Arc;
/// Network configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetworkConfig {
/// Number of neurons
pub num_neurons: usize,
/// Simulation timestep (ms)
pub dt: f32,
/// Enable parallel processing
pub parallel: bool,
/// Maximum synapses per neuron (for ASIC budgeting)
pub max_synapses_per_neuron: usize,
/// Topology configuration
pub topology: TopologyConfig,
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
num_neurons: 1000,
dt: 1.0,
parallel: true,
max_synapses_per_neuron: 100,
topology: TopologyConfig::default(),
}
}
}
/// Statistics from network simulation.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct NetworkStats {
/// Total spikes generated
pub total_spikes: usize,
/// Spikes per timestep
pub spikes_per_step: Vec<usize>,
/// Active neuron count per timestep
pub active_neurons_per_step: Vec<usize>,
/// Energy consumed (picojoules)
pub energy_consumed: f64,
/// Simulation time (ms)
pub simulation_time: f32,
/// Average firing rate (Hz)
pub avg_firing_rate: f32,
/// Network sparsity (fraction of silent neurons)
pub sparsity: f32,
}
impl NetworkStats {
/// Calculate statistics after simulation.
pub fn finalize(&mut self) {
if self.spikes_per_step.is_empty() {
return;
}
self.total_spikes = self.spikes_per_step.iter().sum();
let num_neurons = if !self.active_neurons_per_step.is_empty() {
self.active_neurons_per_step.iter().max().copied().unwrap_or(1)
} else {
1
};
// Calculate average firing rate
if self.simulation_time > 0.0 && num_neurons > 0 {
self.avg_firing_rate = (self.total_spikes as f32)
/ (num_neurons as f32)
/ self.simulation_time
* 1000.0;
}
// Calculate sparsity
let total_possible: usize = self.active_neurons_per_step.iter().sum();
if total_possible > 0 {
self.sparsity = 1.0 - (self.total_spikes as f32 / total_possible as f32);
}
}
}
/// Event in the priority queue for event-driven simulation.
#[derive(Debug, Clone)]
struct NetworkEvent {
/// Target neuron index
target: usize,
/// Event time
time: f32,
/// Input current to deliver
current: f32,
}
impl PartialEq for NetworkEvent {
fn eq(&self, other: &Self) -> bool {
self.time == other.time && self.target == other.target
}
}
impl Eq for NetworkEvent {}
impl std::hash::Hash for NetworkEvent {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.target.hash(state);
self.time.to_bits().hash(state);
}
}
/// Spiking neural network with event-driven processing.
pub struct SpikingNetwork {
/// Network configuration
config: NetworkConfig,
/// Neurons (using LIF as default)
neurons: Vec<LIFNeuron>,
/// Outgoing connections: source -> [(target, synapse)]
connections: Vec<SmallVec<[(usize, Synapse); 16]>>,
/// Event queue for spike scheduling
event_queue: PriorityQueue<NetworkEvent, Reverse<i64>>,
/// Current simulation time
current_time: f32,
/// Statistics collector
stats: NetworkStats,
/// Output spikes (for external readout)
output_spikes: Arc<RwLock<Vec<SpikeEvent>>>,
}
impl SpikingNetwork {
/// Create a new spiking network.
pub fn new(config: NetworkConfig) -> Result<Self> {
if config.num_neurons == 0 {
return Err(SpikingError::InvalidParams("num_neurons must be > 0".into()));
}
// Initialize neurons
let neurons: Vec<LIFNeuron> = (0..config.num_neurons)
.map(|_| LIFNeuron::with_defaults())
.collect();
// Initialize connection storage
let connections = vec![SmallVec::new(); config.num_neurons];
Ok(Self {
config,
neurons,
connections,
event_queue: PriorityQueue::new(),
current_time: 0.0,
stats: NetworkStats::default(),
output_spikes: Arc::new(RwLock::new(Vec::new())),
})
}
/// Create network with given number of neurons and default config.
pub fn with_neurons(num_neurons: usize) -> Result<Self> {
Self::new(NetworkConfig {
num_neurons,
..Default::default()
})
}
/// Add a synaptic connection.
pub fn connect(&mut self, source: usize, target: usize, synapse: Synapse) -> Result<()> {
if source >= self.config.num_neurons || target >= self.config.num_neurons {
return Err(SpikingError::TopologyError("Invalid neuron indices".into()));
}
if self.connections[source].len() >= self.config.max_synapses_per_neuron {
return Err(SpikingError::ResourceExhausted(format!(
"Max synapses ({}) reached for neuron {}",
self.config.max_synapses_per_neuron, source
)));
}
self.connections[source].push((target, synapse));
Ok(())
}
/// Build network topology from configuration.
pub fn build_topology(&mut self) -> Result<()> {
let pattern = self.config.topology.pattern.clone();
let num_neurons = self.config.num_neurons;
match pattern {
ConnectionPattern::AllToAll { probability } => {
self.build_random_connections(probability)?;
}
ConnectionPattern::LocalGrid { width, radius } => {
self.build_local_grid(width, radius)?;
}
ConnectionPattern::SmallWorld {
k,
rewire_prob,
} => {
self.build_small_world(k, rewire_prob)?;
}
ConnectionPattern::Feedforward { layer_sizes } => {
self.build_feedforward(&layer_sizes)?;
}
ConnectionPattern::Custom => {
// Connections added manually
}
}
Ok(())
}
fn build_random_connections(&mut self, probability: f32) -> Result<()> {
use rand::Rng;
let mut rng = rand::thread_rng();
let n = self.config.num_neurons;
for src in 0..n {
for tgt in 0..n {
if src != tgt && rng.gen::<f32>() < probability {
let weight = rng.gen_range(0.1..1.0);
let synapse = Synapse::excitatory(weight);
let _ = self.connect(src, tgt, synapse);
}
}
}
Ok(())
}
fn build_local_grid(&mut self, width: usize, radius: usize) -> Result<()> {
use rand::Rng;
let mut rng = rand::thread_rng();
let height = self.config.num_neurons / width;
for y in 0..height {
for x in 0..width {
let src = y * width + x;
// Connect to neighbors within radius
for dy in -(radius as i32)..=(radius as i32) {
for dx in -(radius as i32)..=(radius as i32) {
if dx == 0 && dy == 0 {
continue;
}
let nx = (x as i32 + dx).rem_euclid(width as i32) as usize;
let ny = (y as i32 + dy).rem_euclid(height as i32) as usize;
let tgt = ny * width + nx;
if tgt < self.config.num_neurons {
let distance = ((dx * dx + dy * dy) as f32).sqrt();
let weight = 1.0 / distance * rng.gen_range(0.5..1.0);
let synapse = Synapse::excitatory(weight);
let _ = self.connect(src, tgt, synapse);
}
}
}
}
}
Ok(())
}
fn build_small_world(&mut self, k: usize, rewire_prob: f32) -> Result<()> {
use rand::Rng;
let mut rng = rand::thread_rng();
let n = self.config.num_neurons;
// Start with ring lattice
for i in 0..n {
for j in 1..=k / 2 {
let neighbor = (i + j) % n;
let weight = rng.gen_range(0.5..1.0);
let synapse = Synapse::excitatory(weight);
let _ = self.connect(i, neighbor, synapse);
}
}
// Rewire with probability
for i in 0..n {
if rng.gen::<f32>() < rewire_prob {
let new_target = rng.gen_range(0..n);
if new_target != i {
let weight = rng.gen_range(0.5..1.0);
let synapse = Synapse::excitatory(weight);
let _ = self.connect(i, new_target, synapse);
}
}
}
Ok(())
}
fn build_feedforward(&mut self, layer_sizes: &[usize]) -> Result<()> {
use rand::Rng;
let mut rng = rand::thread_rng();
let mut offset = 0;
for i in 0..layer_sizes.len() - 1 {
let src_size = layer_sizes[i];
let tgt_size = layer_sizes[i + 1];
let tgt_offset = offset + src_size;
for src in 0..src_size {
for tgt in 0..tgt_size {
let weight = rng.gen_range(0.1..1.0);
let synapse = Synapse::excitatory(weight);
let _ = self.connect(offset + src, tgt_offset + tgt, synapse);
}
}
offset = tgt_offset;
}
Ok(())
}
/// Inject external input spikes.
pub fn inject_spikes(&mut self, spikes: &SparseSpikes) {
for event in &spikes.events {
if (event.source as usize) < self.config.num_neurons {
self.schedule_event(
event.source as usize,
event.time,
1.0, // Unit current for input spikes
);
}
}
}
/// Schedule an event for future processing.
fn schedule_event(&mut self, target: usize, time: f32, current: f32) {
let event = NetworkEvent {
target,
time,
current,
};
// Priority is negative time (earlier = higher priority)
let priority = Reverse((time * 1000.0) as i64);
self.event_queue.push(event, priority);
}
/// Process one timestep using event-driven simulation.
pub fn step(&mut self) -> usize {
let dt = self.config.dt;
let next_time = self.current_time + dt;
let mut spikes_this_step = 0;
// Process events up to next_time
while let Some((event, _)) = self.event_queue.peek() {
if event.time > next_time {
break;
}
let event = self.event_queue.pop().unwrap().0;
self.neurons[event.target].receive_input(event.current);
}
// Update all neurons and collect spikes
let spike_indices: Vec<usize> = if self.config.parallel {
self.neurons
.par_iter_mut()
.enumerate()
.filter_map(|(i, neuron)| {
if neuron.update(dt) {
Some(i)
} else {
None
}
})
.collect()
} else {
self.neurons
.iter_mut()
.enumerate()
.filter_map(|(i, neuron)| {
if neuron.update(dt) {
Some(i)
} else {
None
}
})
.collect()
};
// Propagate spikes
for &src in &spike_indices {
spikes_this_step += 1;
// Record output spike
{
let mut outputs = self.output_spikes.write();
outputs.push(SpikeEvent::new(src as u32, self.current_time));
}
// Schedule postsynaptic events
for &(target, ref synapse) in &self.connections[src] {
let arrival_time = self.current_time + synapse.delay;
let current = synapse.weight * synapse.sign();
self.schedule_event(target, arrival_time, current);
}
}
// Update statistics
self.stats.spikes_per_step.push(spikes_this_step);
self.stats.active_neurons_per_step.push(self.config.num_neurons);
self.stats.energy_consumed += self.estimate_step_energy(spikes_this_step);
self.current_time = next_time;
spikes_this_step
}
/// Run simulation for given duration.
pub fn run(&mut self, duration_ms: f32) -> NetworkStats {
let num_steps = (duration_ms / self.config.dt) as usize;
for _ in 0..num_steps {
self.step();
}
self.stats.simulation_time = duration_ms;
self.stats.finalize();
self.stats.clone()
}
/// Get output spikes.
pub fn output_spikes(&self) -> Vec<SpikeEvent> {
self.output_spikes.read().clone()
}
/// Clear output spike buffer.
pub fn clear_outputs(&mut self) {
self.output_spikes.write().clear();
}
/// Reset network to initial state.
pub fn reset(&mut self) {
for neuron in &mut self.neurons {
neuron.reset();
}
self.event_queue.clear();
self.current_time = 0.0;
self.stats = NetworkStats::default();
self.output_spikes.write().clear();
}
/// Get current time.
pub fn current_time(&self) -> f32 {
self.current_time
}
/// Get number of neurons.
pub fn num_neurons(&self) -> usize {
self.config.num_neurons
}
/// Get total number of synapses.
pub fn num_synapses(&self) -> usize {
self.connections.iter().map(|c| c.len()).sum()
}
/// Estimate energy for one step.
fn estimate_step_energy(&self, num_spikes: usize) -> f64 {
// Base energy for updates
let update_energy = self.config.num_neurons as f64 * 3.0; // pJ per neuron
// Spike energy
let spike_energy = num_spikes as f64 * 10.0; // pJ per spike
update_energy + spike_energy
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_network_creation() {
let network = SpikingNetwork::with_neurons(100).unwrap();
assert_eq!(network.num_neurons(), 100);
}
#[test]
fn test_basic_connection() {
let mut network = SpikingNetwork::with_neurons(10).unwrap();
let synapse = Synapse::excitatory(0.5);
network.connect(0, 1, synapse).unwrap();
assert_eq!(network.num_synapses(), 1);
}
#[test]
fn test_simulation_step() {
let mut network = SpikingNetwork::with_neurons(10).unwrap();
// Add some connections
for i in 0..9 {
network.connect(i, i + 1, Synapse::excitatory(0.5)).unwrap();
}
// Inject strong input to first neuron
let mut spikes = SparseSpikes::new(10, 1);
spikes.add_spike(0, 0);
network.inject_spikes(&spikes);
// Run a few steps
let stats = network.run(100.0);
assert!(stats.total_spikes > 0);
}
#[test]
fn test_sparsity_tracking() {
let mut network = SpikingNetwork::with_neurons(100).unwrap();
network.build_topology().unwrap();
let stats = network.run(100.0);
// Without input, should be very sparse
assert!(stats.sparsity > 0.9);
}
}

View File

@@ -0,0 +1,413 @@
//! Izhikevich neuron model.
//!
//! The Izhikevich model captures rich spiking dynamics with just two variables:
//! - Membrane potential (fast)
//! - Recovery variable (slow)
//!
//! This allows simulation of 20+ different firing patterns observed in cortical neurons,
//! while remaining computationally efficient.
//!
//! ## Firing Patterns
//!
//! - Regular spiking (RS) - most common excitatory
//! - Intrinsically bursting (IB) - burst then regular
//! - Chattering (CH) - fast rhythmic bursting
//! - Fast spiking (FS) - inhibitory interneurons
//! - Low-threshold spiking (LTS) - inhibitory
//!
//! ## ASIC Considerations
//!
//! - 2 multiply-accumulates per timestep
//! - 1 multiplication for recovery
//! - ~150-200 gates in digital implementation
use super::{NeuronParams, NeuronState, SpikingNeuron, EnergyModel};
use serde::{Deserialize, Serialize};
/// Pre-defined Izhikevich neuron types with biological parameters.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum IzhikevichType {
/// Regular spiking - most common excitatory cortical neuron
RegularSpiking,
/// Intrinsically bursting - initial burst then regular spikes
IntrinsicallyBursting,
/// Chattering - fast rhythmic bursting
Chattering,
/// Fast spiking - typical inhibitory interneuron
FastSpiking,
/// Low-threshold spiking - inhibitory with rebound
LowThresholdSpiking,
/// Thalamo-cortical - two firing modes
ThalamoCortical,
/// Resonator - subthreshold oscillations
Resonator,
}
impl IzhikevichType {
/// Get parameters for this neuron type.
pub fn params(self) -> IzhikevichParams {
match self {
Self::RegularSpiking => IzhikevichParams {
a: 0.02,
b: 0.2,
c: -65.0,
d: 8.0,
threshold: 30.0,
refractory: 0.0, // Implicit in dynamics
},
Self::IntrinsicallyBursting => IzhikevichParams {
a: 0.02,
b: 0.2,
c: -55.0,
d: 4.0,
threshold: 30.0,
refractory: 0.0,
},
Self::Chattering => IzhikevichParams {
a: 0.02,
b: 0.2,
c: -50.0,
d: 2.0,
threshold: 30.0,
refractory: 0.0,
},
Self::FastSpiking => IzhikevichParams {
a: 0.1,
b: 0.2,
c: -65.0,
d: 2.0,
threshold: 30.0,
refractory: 0.0,
},
Self::LowThresholdSpiking => IzhikevichParams {
a: 0.02,
b: 0.25,
c: -65.0,
d: 2.0,
threshold: 30.0,
refractory: 0.0,
},
Self::ThalamoCortical => IzhikevichParams {
a: 0.02,
b: 0.25,
c: -65.0,
d: 0.05,
threshold: 30.0,
refractory: 0.0,
},
Self::Resonator => IzhikevichParams {
a: 0.1,
b: 0.26,
c: -65.0,
d: 2.0,
threshold: 30.0,
refractory: 0.0,
},
}
}
}
/// Parameters for Izhikevich neuron model.
///
/// The model equations are:
/// ```text
/// dv/dt = 0.04*v² + 5*v + 140 - u + I
/// du/dt = a*(b*v - u)
/// if v >= 30 mV: v = c, u = u + d
/// ```
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub struct IzhikevichParams {
/// Time scale of recovery variable (smaller = slower recovery)
pub a: f32,
/// Sensitivity of recovery to subthreshold membrane potential
pub b: f32,
/// After-spike reset value of membrane potential (mV)
pub c: f32,
/// After-spike reset increment of recovery variable
pub d: f32,
/// Spike threshold (mV) - typically 30
pub threshold: f32,
/// Explicit refractory period (ms) - usually 0 for Izhikevich
pub refractory: f32,
}
impl Default for IzhikevichParams {
fn default() -> Self {
IzhikevichType::RegularSpiking.params()
}
}
impl NeuronParams for IzhikevichParams {
fn threshold(&self) -> f32 {
self.threshold
}
fn reset_potential(&self) -> f32 {
self.c
}
fn resting_potential(&self) -> f32 {
// Resting potential is approximately -65 to -70 mV
-65.0
}
fn refractory_period(&self) -> f32 {
self.refractory
}
fn validate(&self) -> Option<String> {
if self.a <= 0.0 || self.a > 1.0 {
return Some("a should be in (0, 1]".into());
}
if self.threshold < 0.0 {
return Some("threshold should be positive".into());
}
None
}
}
/// Izhikevich neuron model.
///
/// Provides rich spiking dynamics while remaining computationally efficient.
/// The two-variable model captures most qualitative behaviors of biological neurons.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IzhikevichNeuron {
/// Model parameters
params: IzhikevichParams,
/// Membrane potential (mV)
v: f32,
/// Recovery variable (dimensionless)
u: f32,
/// Accumulated input current
input_current: f32,
/// Time since last spike
time_since_spike: Option<f32>,
/// Refractory countdown (if explicit refractory used)
refractory_remaining: f32,
}
impl IzhikevichNeuron {
/// Create neuron from predefined type.
pub fn from_type(neuron_type: IzhikevichType) -> Self {
Self::new(neuron_type.params())
}
/// Create regular spiking neuron (most common).
pub fn regular_spiking() -> Self {
Self::from_type(IzhikevichType::RegularSpiking)
}
/// Create fast spiking neuron (inhibitory).
pub fn fast_spiking() -> Self {
Self::from_type(IzhikevichType::FastSpiking)
}
/// Get recovery variable.
pub fn recovery(&self) -> f32 {
self.u
}
}
impl SpikingNeuron for IzhikevichNeuron {
type Params = IzhikevichParams;
fn new(params: IzhikevichParams) -> Self {
// Initialize at resting state
let v = params.c;
let u = params.b * v;
Self {
params,
v,
u,
input_current: 0.0,
time_since_spike: None,
refractory_remaining: 0.0,
}
}
fn state(&self) -> NeuronState {
NeuronState {
membrane_potential: self.v,
time_since_spike: self.time_since_spike,
is_refractory: self.refractory_remaining > 0.0,
input_current: self.input_current,
}
}
fn params(&self) -> &Self::Params {
&self.params
}
fn receive_input(&mut self, current: f32) {
self.input_current += current;
}
fn update(&mut self, dt: f32) -> bool {
// Update time since spike
if let Some(ref mut t) = self.time_since_spike {
*t += dt;
}
// Handle explicit refractory if set
if self.refractory_remaining > 0.0 {
self.refractory_remaining -= dt;
self.input_current = 0.0;
return false;
}
// Izhikevich dynamics with Euler integration
// For numerical stability, use two half-steps for v
let i = self.input_current;
// Half step 1
let dv1 = 0.04 * self.v * self.v + 5.0 * self.v + 140.0 - self.u + i;
self.v += dv1 * dt * 0.5;
// Half step 2
let dv2 = 0.04 * self.v * self.v + 5.0 * self.v + 140.0 - self.u + i;
self.v += dv2 * dt * 0.5;
// Recovery variable
let du = self.params.a * (self.params.b * self.v - self.u);
self.u += du * dt;
// Clear input
self.input_current = 0.0;
// Spike check
if self.v >= self.params.threshold {
// Spike!
self.v = self.params.c;
self.u += self.params.d;
self.time_since_spike = Some(0.0);
self.refractory_remaining = self.params.refractory;
true
} else {
false
}
}
fn reset(&mut self) {
self.v = self.params.c;
self.u = self.params.b * self.v;
self.input_current = 0.0;
self.time_since_spike = None;
self.refractory_remaining = 0.0;
}
fn is_refractory(&self) -> bool {
self.refractory_remaining > 0.0
}
fn membrane_potential(&self) -> f32 {
self.v
}
fn time_since_spike(&self) -> Option<f32> {
self.time_since_spike
}
}
impl EnergyModel for IzhikevichNeuron {
fn update_energy(&self) -> f32 {
// Estimate: 3 multiplies, 6 adds, 1 comparison
// More complex than LIF
5.0 // picojoules
}
fn spike_energy(&self) -> f32 {
10.0 // picojoules
}
fn silicon_area(&self) -> f32 {
// ~150-200 gates at 28nm
17.5 // square micrometers
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_izhikevich_types() {
// Test all predefined types can be created
let types = [
IzhikevichType::RegularSpiking,
IzhikevichType::IntrinsicallyBursting,
IzhikevichType::Chattering,
IzhikevichType::FastSpiking,
IzhikevichType::LowThresholdSpiking,
IzhikevichType::ThalamoCortical,
IzhikevichType::Resonator,
];
for neuron_type in types {
let neuron = IzhikevichNeuron::from_type(neuron_type);
assert!(neuron.params().validate().is_none());
}
}
#[test]
fn test_regular_spiking_behavior() {
let mut neuron = IzhikevichNeuron::regular_spiking();
let mut spike_count = 0;
// Inject constant current and count spikes
for _ in 0..1000 {
neuron.receive_input(10.0);
if neuron.update(1.0) {
spike_count += 1;
}
}
// Should spike regularly
assert!(spike_count > 10, "Regular spiking neuron should fire regularly");
assert!(spike_count < 200, "Should not fire too fast");
}
#[test]
fn test_fast_spiking_behavior() {
let mut fs = IzhikevichNeuron::fast_spiking();
let mut rs = IzhikevichNeuron::regular_spiking();
let mut fs_spikes = 0;
let mut rs_spikes = 0;
// Same input to both
for _ in 0..1000 {
fs.receive_input(14.0);
rs.receive_input(14.0);
if fs.update(1.0) { fs_spikes += 1; }
if rs.update(1.0) { rs_spikes += 1; }
}
// Fast spiking should fire more often
assert!(fs_spikes > rs_spikes, "Fast spiking should fire more than regular");
}
#[test]
fn test_recovery_dynamics() {
let mut neuron = IzhikevichNeuron::regular_spiking();
let initial_u = neuron.recovery();
// After spike, recovery should increase
neuron.v = 35.0; // Above threshold
neuron.update(1.0);
assert!(neuron.recovery() > initial_u, "Recovery should increase after spike");
}
#[test]
fn test_subthreshold_dynamics() {
let mut neuron = IzhikevichNeuron::regular_spiking();
// Weak input should not cause immediate spike
neuron.receive_input(2.0);
assert!(!neuron.update(1.0));
// Voltage should rise but not spike
assert!(neuron.membrane_potential() > neuron.params.c);
}
}

View File

@@ -0,0 +1,316 @@
//! Leaky Integrate-and-Fire (LIF) neuron model.
//!
//! The LIF model is the workhorse of neuromorphic computing:
//! - Simple dynamics: membrane voltage leaks toward rest
//! - Spikes when threshold crossed
//! - Resets and enters refractory period
//!
//! ## ASIC Benefits
//!
//! - Single multiply-accumulate per timestep
//! - No division (pre-computed decay factor)
//! - 2-3 comparisons per update
//! - ~100 gates in digital implementation
use super::{NeuronParams, NeuronState, SpikingNeuron, EnergyModel};
use serde::{Deserialize, Serialize};
/// Parameters for LIF neuron.
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub struct LIFParams {
/// Membrane time constant (ms) - controls leak rate
pub tau_m: f32,
/// Spike threshold (mV)
pub threshold: f32,
/// Reset potential after spike (mV)
pub reset: f32,
/// Resting membrane potential (mV)
pub resting: f32,
/// Refractory period (ms)
pub refractory: f32,
/// Membrane resistance (MOhm) - scales input current
pub resistance: f32,
}
impl Default for LIFParams {
fn default() -> Self {
Self {
tau_m: 20.0,
threshold: -50.0,
reset: -70.0,
resting: -65.0,
refractory: 2.0,
resistance: 10.0, // 10 MOhm typical for cortical neurons
}
}
}
impl NeuronParams for LIFParams {
fn threshold(&self) -> f32 {
self.threshold
}
fn reset_potential(&self) -> f32 {
self.reset
}
fn resting_potential(&self) -> f32 {
self.resting
}
fn refractory_period(&self) -> f32 {
self.refractory
}
fn validate(&self) -> Option<String> {
if self.tau_m <= 0.0 {
return Some("tau_m must be positive".into());
}
if self.threshold <= self.reset {
return Some("threshold must be greater than reset".into());
}
if self.refractory < 0.0 {
return Some("refractory period cannot be negative".into());
}
if self.resistance <= 0.0 {
return Some("resistance must be positive".into());
}
None
}
}
/// Leaky Integrate-and-Fire neuron.
///
/// Implements the differential equation:
/// ```text
/// τ_m * dV/dt = -(V - V_rest) + R * I
/// ```
///
/// With spike condition: V ≥ V_threshold
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LIFNeuron {
/// Neuron parameters
params: LIFParams,
/// Current membrane potential (mV)
membrane_potential: f32,
/// Time remaining in refractory period (ms)
refractory_remaining: f32,
/// Accumulated input current for this timestep
input_current: f32,
/// Time since last spike (ms)
time_since_spike: Option<f32>,
/// Pre-computed decay factor for efficiency
decay_factor: f32,
}
impl LIFNeuron {
/// Create LIF neuron with default parameters.
pub fn with_defaults() -> Self {
Self::new(LIFParams::default())
}
/// Pre-compute decay factor for given timestep.
///
/// This avoids division in the hot path.
/// decay = exp(-dt / tau_m) ≈ 1 - dt/tau_m for small dt
fn compute_decay(&self, dt: f32) -> f32 {
// Use linear approximation for ASIC compatibility
// Error < 1% for dt < 2ms with tau_m = 20ms
1.0 - dt / self.params.tau_m
}
/// Get the pre-computed decay factor.
pub fn decay_factor(&self) -> f32 {
self.decay_factor
}
}
impl SpikingNeuron for LIFNeuron {
type Params = LIFParams;
fn new(params: LIFParams) -> Self {
let decay_factor = 1.0 - 1.0 / params.tau_m; // For dt=1ms default
Self {
params,
membrane_potential: params.resting,
refractory_remaining: 0.0,
input_current: 0.0,
time_since_spike: None,
decay_factor,
}
}
fn state(&self) -> NeuronState {
NeuronState {
membrane_potential: self.membrane_potential,
time_since_spike: self.time_since_spike,
is_refractory: self.refractory_remaining > 0.0,
input_current: self.input_current,
}
}
fn params(&self) -> &Self::Params {
&self.params
}
fn receive_input(&mut self, current: f32) {
// Accumulate input - this is the sparse event
self.input_current += current;
}
fn update(&mut self, dt: f32) -> bool {
// Update time since spike
if let Some(ref mut t) = self.time_since_spike {
*t += dt;
}
// Handle refractory period
if self.refractory_remaining > 0.0 {
self.refractory_remaining -= dt;
self.input_current = 0.0; // Clear accumulated input
return false;
}
// Compute decay factor for this timestep
let decay = self.compute_decay(dt);
// LIF dynamics: V = decay * V + (1-decay) * V_rest + R * I * dt / tau_m
// Simplified: V = decay * (V - V_rest) + V_rest + R * I * dt / tau_m
let v_diff = self.membrane_potential - self.params.resting;
let input_term = self.params.resistance * self.input_current * dt / self.params.tau_m;
self.membrane_potential = decay * v_diff + self.params.resting + input_term;
// Clear input for next timestep
self.input_current = 0.0;
// Check for spike
if self.membrane_potential >= self.params.threshold {
// Spike!
self.membrane_potential = self.params.reset;
self.refractory_remaining = self.params.refractory;
self.time_since_spike = Some(0.0);
true
} else {
false
}
}
fn reset(&mut self) {
self.membrane_potential = self.params.resting;
self.refractory_remaining = 0.0;
self.input_current = 0.0;
self.time_since_spike = None;
}
fn is_refractory(&self) -> bool {
self.refractory_remaining > 0.0
}
fn membrane_potential(&self) -> f32 {
self.membrane_potential
}
fn time_since_spike(&self) -> Option<f32> {
self.time_since_spike
}
}
impl EnergyModel for LIFNeuron {
fn update_energy(&self) -> f32 {
// Estimate: 1 multiply, 3 adds, 2 comparisons
// At 28nm: ~0.5 pJ per operation
3.0 // picojoules
}
fn spike_energy(&self) -> f32 {
// Spike packet generation and routing
10.0 // picojoules
}
fn silicon_area(&self) -> f32 {
// ~100 gates at 28nm ≈ 0.1 μm² per gate
10.0 // square micrometers
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_lif_default_creation() {
let neuron = LIFNeuron::with_defaults();
assert_eq!(neuron.membrane_potential(), -65.0);
assert!(!neuron.is_refractory());
}
#[test]
fn test_lif_spike_generation() {
let mut neuron = LIFNeuron::with_defaults();
// Inject strong current
for _ in 0..100 {
neuron.receive_input(5.0); // Strong input
if neuron.update(1.0) {
// Spiked!
assert!(neuron.is_refractory());
assert_eq!(neuron.membrane_potential(), neuron.params.reset);
return;
}
}
panic!("Neuron should have spiked with strong input");
}
#[test]
fn test_lif_refractory_period() {
let params = LIFParams {
refractory: 5.0,
..Default::default()
};
let mut neuron = LIFNeuron::new(params);
// Force a spike
neuron.membrane_potential = params.threshold + 1.0;
neuron.update(1.0);
// Should be refractory
assert!(neuron.is_refractory());
// Should not spike during refractory
neuron.receive_input(100.0);
assert!(!neuron.update(1.0));
// After refractory period
for _ in 0..5 {
neuron.update(1.0);
}
assert!(!neuron.is_refractory());
}
#[test]
fn test_lif_leak_to_rest() {
let mut neuron = LIFNeuron::with_defaults();
neuron.membrane_potential = -55.0; // Above resting
// Without input, should decay toward resting
for _ in 0..100 {
neuron.update(1.0);
}
// Should be close to resting potential
assert!((neuron.membrane_potential() - (-65.0)).abs() < 1.0);
}
#[test]
fn test_params_validation() {
let invalid = LIFParams {
tau_m: -1.0,
..Default::default()
};
assert!(invalid.validate().is_some());
let valid = LIFParams::default();
assert!(valid.validate().is_none());
}
}

View File

@@ -0,0 +1,41 @@
//! Spiking neuron models.
//!
//! This module provides biologically-inspired neuron models optimized for
//! event-driven computation. Neurons stay silent until a threshold is crossed,
//! eliminating wasted cycles on unchanged state.
//!
//! ## Available Models
//!
//! - **LIF (Leaky Integrate-and-Fire)**: Simple, efficient, ASIC-friendly
//! - **Izhikevich**: Rich dynamics, biologically plausible spiking patterns
//!
//! ## ASIC Considerations
//!
//! These models are designed for minimal silicon cost:
//! - Fixed-point compatible arithmetic
//! - No division operations in hot paths
//! - Predictable memory access patterns
//! - Branch-friendly state machines
mod lif;
mod izhikevich;
mod traits;
pub use lif::{LIFNeuron, LIFParams};
pub use izhikevich::{IzhikevichNeuron, IzhikevichParams, IzhikevichType};
pub use traits::{NeuronParams, SpikingNeuron, NeuronState};
/// Default membrane time constant (ms)
pub const DEFAULT_TAU_M: f32 = 20.0;
/// Default spike threshold (mV)
pub const DEFAULT_THRESHOLD: f32 = -50.0;
/// Default resting potential (mV)
pub const DEFAULT_RESTING: f32 = -65.0;
/// Default reset potential (mV)
pub const DEFAULT_RESET: f32 = -70.0;
/// Default refractory period (ms)
pub const DEFAULT_REFRACTORY: f32 = 2.0;

View File

@@ -0,0 +1,108 @@
//! Trait definitions for spiking neurons.
use serde::{Deserialize, Serialize};
/// State of a spiking neuron.
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub struct NeuronState {
/// Membrane potential (mV)
pub membrane_potential: f32,
/// Time since last spike (ms), None if never spiked
pub time_since_spike: Option<f32>,
/// Whether the neuron is currently in refractory period
pub is_refractory: bool,
/// Accumulated input current for this timestep
pub input_current: f32,
}
impl Default for NeuronState {
fn default() -> Self {
Self {
membrane_potential: -65.0, // Resting potential
time_since_spike: None,
is_refractory: false,
input_current: 0.0,
}
}
}
/// Parameters that define a spiking neuron's behavior.
pub trait NeuronParams: Clone + Send + Sync {
/// Get the spike threshold voltage
fn threshold(&self) -> f32;
/// Get the reset voltage after spike
fn reset_potential(&self) -> f32;
/// Get the resting membrane potential
fn resting_potential(&self) -> f32;
/// Get the refractory period in milliseconds
fn refractory_period(&self) -> f32;
/// Validate parameters, returning error message if invalid
fn validate(&self) -> Option<String>;
}
/// Core trait for spiking neuron models.
///
/// Implementing types should be efficient for ASIC deployment:
/// - Avoid floating-point division in `update()`
/// - Use predictable branching
/// - Minimize memory footprint
pub trait SpikingNeuron: Clone + Send + Sync {
/// Associated parameter type
type Params: NeuronParams;
/// Create a new neuron with given parameters
fn new(params: Self::Params) -> Self;
/// Get current neuron state
fn state(&self) -> NeuronState;
/// Get neuron parameters
fn params(&self) -> &Self::Params;
/// Add input current (from incoming spikes or external input)
///
/// This is a sparse operation - only called when input arrives.
fn receive_input(&mut self, current: f32);
/// Update neuron state for one timestep.
///
/// Returns `true` if the neuron fires a spike.
///
/// # Arguments
/// * `dt` - Time step in milliseconds
///
/// # ASIC Optimization
/// This is the hot path. Implementations should:
/// - Use only additions and multiplications
/// - Avoid conditional branches where possible
/// - Use fixed-point compatible operations
fn update(&mut self, dt: f32) -> bool;
/// Reset neuron to initial state
fn reset(&mut self);
/// Check if neuron is in refractory period
fn is_refractory(&self) -> bool;
/// Get membrane potential
fn membrane_potential(&self) -> f32;
/// Get time since last spike (if any)
fn time_since_spike(&self) -> Option<f32>;
}
/// Energy estimation for ASIC cost analysis.
pub trait EnergyModel {
/// Estimate energy cost for a single update step (picojoules)
fn update_energy(&self) -> f32;
/// Estimate energy cost for spike emission (picojoules)
fn spike_energy(&self) -> f32;
/// Estimate silicon area (square micrometers)
fn silicon_area(&self) -> f32;
}