Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
400
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/discrete_time_crystal.rs
vendored
Normal file
400
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/discrete_time_crystal.rs
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
// Discrete Time Crystal Implementation
|
||||
// Simulates discrete time translation symmetry breaking in neural-inspired systems
|
||||
|
||||
use ndarray::{Array1, Array2};
|
||||
use rand::Rng;
|
||||
use std::f64::consts::PI;
|
||||
|
||||
/// Configuration for discrete time crystal simulation
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DTCConfig {
|
||||
/// Number of oscillators/neurons
|
||||
pub n_oscillators: usize,
|
||||
/// Drive frequency (Hz)
|
||||
pub drive_frequency: f64,
|
||||
/// Drive amplitude
|
||||
pub drive_amplitude: f64,
|
||||
/// Coupling strength between oscillators
|
||||
pub coupling_strength: f64,
|
||||
/// Dissipation rate
|
||||
pub dissipation: f64,
|
||||
/// Nonlinearity parameter
|
||||
pub nonlinearity: f64,
|
||||
/// Noise amplitude
|
||||
pub noise_amplitude: f64,
|
||||
/// Time step for integration
|
||||
pub dt: f64,
|
||||
}
|
||||
|
||||
impl Default for DTCConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
n_oscillators: 100,
|
||||
drive_frequency: 8.0, // Theta frequency
|
||||
drive_amplitude: 2.0,
|
||||
coupling_strength: 0.5,
|
||||
dissipation: 0.1,
|
||||
nonlinearity: 1.0,
|
||||
noise_amplitude: 0.01,
|
||||
dt: 0.001,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Discrete Time Crystal simulator
|
||||
pub struct DiscreteTimeCrystal {
|
||||
config: DTCConfig,
|
||||
/// State: position of each oscillator
|
||||
positions: Array1<f64>,
|
||||
/// Velocities
|
||||
velocities: Array1<f64>,
|
||||
/// Coupling matrix (asymmetric)
|
||||
coupling_matrix: Array2<f64>,
|
||||
/// Current time
|
||||
time: f64,
|
||||
/// Random number generator
|
||||
rng: rand::rngs::ThreadRng,
|
||||
}
|
||||
|
||||
impl DiscreteTimeCrystal {
|
||||
/// Create new DTC simulator
|
||||
pub fn new(config: DTCConfig) -> Self {
|
||||
let mut rng = rand::thread_rng();
|
||||
let n = config.n_oscillators;
|
||||
|
||||
// Initialize positions and velocities randomly
|
||||
let positions = Array1::from_vec((0..n).map(|_| rng.gen_range(-1.0..1.0)).collect());
|
||||
let velocities = Array1::from_vec((0..n).map(|_| rng.gen_range(-0.1..0.1)).collect());
|
||||
|
||||
// Create asymmetric coupling matrix
|
||||
let coupling_matrix = Self::generate_asymmetric_coupling(n, &mut rng);
|
||||
|
||||
Self {
|
||||
config,
|
||||
positions,
|
||||
velocities,
|
||||
coupling_matrix,
|
||||
time: 0.0,
|
||||
rng,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate asymmetric coupling matrix
|
||||
/// Asymmetry is crucial for breaking detailed balance and enabling limit cycles
|
||||
fn generate_asymmetric_coupling(n: usize, rng: &mut rand::rngs::ThreadRng) -> Array2<f64> {
|
||||
let mut matrix = Array2::zeros((n, n));
|
||||
|
||||
for i in 0..n {
|
||||
for j in 0..n {
|
||||
if i != j {
|
||||
// Sparse coupling
|
||||
if rng.gen::<f64>() < 0.2 {
|
||||
matrix[[i, j]] = rng.gen_range(-1.0..1.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
matrix
|
||||
}
|
||||
|
||||
/// Periodic driving force (e.g., theta oscillations)
|
||||
fn drive_force(&self, t: f64) -> f64 {
|
||||
let omega = 2.0 * PI * self.config.drive_frequency;
|
||||
self.config.drive_amplitude * (omega * t).cos()
|
||||
}
|
||||
|
||||
/// Nonlinear activation function (tanh-like)
|
||||
fn activation(&self, x: f64) -> f64 {
|
||||
(self.config.nonlinearity * x).tanh()
|
||||
}
|
||||
|
||||
/// Compute forces on all oscillators
|
||||
fn compute_forces(&mut self) -> (Array1<f64>, Array1<f64>) {
|
||||
let n = self.config.n_oscillators;
|
||||
let mut forces_pos = Array1::zeros(n);
|
||||
let mut forces_vel = Array1::zeros(n);
|
||||
|
||||
let drive = self.drive_force(self.time);
|
||||
|
||||
for i in 0..n {
|
||||
// Coupling forces (asymmetric → breaks detailed balance)
|
||||
let mut coupling_force = 0.0;
|
||||
for j in 0..n {
|
||||
if i != j {
|
||||
let coupling = self.coupling_matrix[[i, j]];
|
||||
coupling_force += coupling * self.activation(self.positions[j]);
|
||||
}
|
||||
}
|
||||
|
||||
// Position force: restoring + coupling + drive + noise
|
||||
forces_pos[i] = self.velocities[i]; // dx/dt = v
|
||||
|
||||
// Velocity force: -x - γv + J*coupling + A*drive + noise
|
||||
let noise = self.rng.gen_range(-1.0..1.0) * self.config.noise_amplitude;
|
||||
forces_vel[i] = -self.positions[i] - self.config.dissipation * self.velocities[i]
|
||||
+ self.config.coupling_strength * coupling_force
|
||||
+ drive
|
||||
+ noise;
|
||||
}
|
||||
|
||||
(forces_pos, forces_vel)
|
||||
}
|
||||
|
||||
/// Evolve system by one time step (Euler integration)
|
||||
pub fn step(&mut self) {
|
||||
let (forces_pos, forces_vel) = self.compute_forces();
|
||||
|
||||
// Update positions and velocities
|
||||
self.positions += &(forces_pos * self.config.dt);
|
||||
self.velocities += &(forces_vel * self.config.dt);
|
||||
|
||||
self.time += self.config.dt;
|
||||
}
|
||||
|
||||
/// Run simulation for specified duration
|
||||
pub fn run(&mut self, duration: f64) -> Vec<Array1<f64>> {
|
||||
let n_steps = (duration / self.config.dt) as usize;
|
||||
let mut trajectory = Vec::with_capacity(n_steps);
|
||||
|
||||
for _ in 0..n_steps {
|
||||
self.step();
|
||||
trajectory.push(self.positions.clone());
|
||||
}
|
||||
|
||||
trajectory
|
||||
}
|
||||
|
||||
/// Compute order parameter M_k for subharmonic order k
|
||||
pub fn compute_order_parameter(&self, trajectory: &[Array1<f64>], k: usize) -> Vec<f64> {
|
||||
let omega_0 = 2.0 * PI * self.config.drive_frequency;
|
||||
let n = self.config.n_oscillators;
|
||||
|
||||
trajectory
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(step, positions)| {
|
||||
let _t = step as f64 * self.config.dt;
|
||||
|
||||
// Compute phases relative to drive
|
||||
let mut sum_real = 0.0;
|
||||
let mut sum_imag = 0.0;
|
||||
|
||||
for i in 0..n {
|
||||
// Phase of oscillator i
|
||||
let phase = positions[i].atan2(1.0); // Simplified phase extraction
|
||||
let arg = k as f64 * omega_0 * phase;
|
||||
sum_real += arg.cos();
|
||||
sum_imag += arg.sin();
|
||||
}
|
||||
|
||||
// Order parameter: |<e^(ik*omega*phi)>|
|
||||
let m_k = ((sum_real / n as f64).powi(2) + (sum_imag / n as f64).powi(2)).sqrt();
|
||||
m_k
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Compute power spectral density to detect subharmonics
|
||||
pub fn compute_psd(&self, signal: &[f64], sample_rate: f64) -> (Vec<f64>, Vec<f64>) {
|
||||
// Simple FFT-based PSD
|
||||
use rustfft::{num_complex::Complex, FftPlanner};
|
||||
|
||||
let n = signal.len();
|
||||
let mut planner = FftPlanner::new();
|
||||
let fft = planner.plan_fft_forward(n);
|
||||
|
||||
// Convert to complex
|
||||
let mut buffer: Vec<Complex<f64>> =
|
||||
signal.iter().map(|&x| Complex { re: x, im: 0.0 }).collect();
|
||||
|
||||
// Apply FFT
|
||||
fft.process(&mut buffer);
|
||||
|
||||
// Compute power
|
||||
let power: Vec<f64> = buffer
|
||||
.iter()
|
||||
.take(n / 2)
|
||||
.map(|c| (c.re * c.re + c.im * c.im) / n as f64)
|
||||
.collect();
|
||||
|
||||
// Frequency bins
|
||||
let freqs: Vec<f64> = (0..n / 2)
|
||||
.map(|i| i as f64 * sample_rate / n as f64)
|
||||
.collect();
|
||||
|
||||
(freqs, power)
|
||||
}
|
||||
|
||||
/// Detect period-doubling by comparing power at f and f/2
|
||||
pub fn detect_period_doubling(&self, trajectory: &[Array1<f64>]) -> (f64, bool) {
|
||||
// Average activity across all oscillators
|
||||
let signal: Vec<f64> = trajectory
|
||||
.iter()
|
||||
.map(|positions| positions.mean().unwrap())
|
||||
.collect();
|
||||
|
||||
let sample_rate = 1.0 / self.config.dt;
|
||||
let (freqs, power) = self.compute_psd(&signal, sample_rate);
|
||||
|
||||
// Find peaks at drive frequency and its half
|
||||
let drive_freq = self.config.drive_frequency;
|
||||
let half_freq = drive_freq / 2.0;
|
||||
|
||||
// Find power at these frequencies (within tolerance)
|
||||
let tol = 0.5; // Hz tolerance
|
||||
|
||||
let p_drive: f64 = freqs
|
||||
.iter()
|
||||
.zip(&power)
|
||||
.filter(|(f, _)| (*f - drive_freq).abs() < tol)
|
||||
.map(|(_, p)| p)
|
||||
.fold(0.0_f64, |acc, &p| acc.max(p));
|
||||
|
||||
let p_half: f64 = freqs
|
||||
.iter()
|
||||
.zip(&power)
|
||||
.filter(|(f, _)| (*f - half_freq).abs() < tol)
|
||||
.map(|(_, p)| p)
|
||||
.fold(0.0_f64, |acc, &p| acc.max(p));
|
||||
|
||||
// Period-doubling if power at f/2 exceeds power at f
|
||||
let ratio = p_half / p_drive.max(1e-10);
|
||||
let is_period_doubled = ratio > 1.0;
|
||||
|
||||
(ratio, is_period_doubled)
|
||||
}
|
||||
|
||||
/// Get current state
|
||||
pub fn get_state(&self) -> (&Array1<f64>, &Array1<f64>, f64) {
|
||||
(&self.positions, &self.velocities, self.time)
|
||||
}
|
||||
}
|
||||
|
||||
/// Analysis utilities for DTC
|
||||
pub mod analysis {
|
||||
|
||||
/// Compute temporal autocorrelation
|
||||
pub fn autocorrelation(signal: &[f64], max_lag: usize) -> Vec<f64> {
|
||||
let n = signal.len();
|
||||
let mean = signal.iter().sum::<f64>() / n as f64;
|
||||
let variance = signal.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / n as f64;
|
||||
|
||||
(0..max_lag)
|
||||
.map(|lag| {
|
||||
let mut sum = 0.0;
|
||||
for i in 0..(n - lag) {
|
||||
sum += (signal[i] - mean) * (signal[i + lag] - mean);
|
||||
}
|
||||
sum / ((n - lag) as f64 * variance)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Fit autocorrelation to detect power-law vs exponential decay
|
||||
/// Returns (is_power_law, exponent)
|
||||
pub fn classify_decay(autocorr: &[f64]) -> (bool, f64) {
|
||||
// Log-log fit for power law: log(C) ~ -α log(τ)
|
||||
// Log-linear fit for exponential: log(C) ~ -τ/τ_c
|
||||
|
||||
let lags: Vec<f64> = (1..autocorr.len()).map(|i| i as f64).collect();
|
||||
let log_autocorr: Vec<f64> = autocorr
|
||||
.iter()
|
||||
.skip(1)
|
||||
.map(|&c| c.max(1e-10).ln())
|
||||
.collect();
|
||||
|
||||
// Power-law fit
|
||||
let log_lags: Vec<f64> = lags.iter().map(|&x| x.ln()).collect();
|
||||
let power_law_fit = linear_regression(&log_lags, &log_autocorr);
|
||||
|
||||
// Exponential fit
|
||||
let exp_fit = linear_regression(&lags, &log_autocorr);
|
||||
|
||||
// Compare R^2 values
|
||||
let is_power_law = power_law_fit.1 > exp_fit.1; // Better R^2 for power law
|
||||
let exponent = if is_power_law {
|
||||
-power_law_fit.0
|
||||
} else {
|
||||
-exp_fit.0
|
||||
};
|
||||
|
||||
(is_power_law, exponent)
|
||||
}
|
||||
|
||||
/// Simple linear regression: returns (slope, R^2)
|
||||
fn linear_regression(x: &[f64], y: &[f64]) -> (f64, f64) {
|
||||
let n = x.len() as f64;
|
||||
let sum_x: f64 = x.iter().sum();
|
||||
let sum_y: f64 = y.iter().sum();
|
||||
let sum_xx: f64 = x.iter().map(|&xi| xi * xi).sum();
|
||||
let sum_xy: f64 = x.iter().zip(y).map(|(&xi, &yi)| xi * yi).sum();
|
||||
|
||||
let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
|
||||
|
||||
// Compute R^2
|
||||
let mean_y = sum_y / n;
|
||||
let ss_tot: f64 = y.iter().map(|&yi| (yi - mean_y).powi(2)).sum();
|
||||
let ss_res: f64 = x
|
||||
.iter()
|
||||
.zip(y)
|
||||
.map(|(&xi, &yi)| {
|
||||
let pred = slope * xi + (sum_y - slope * sum_x) / n;
|
||||
(yi - pred).powi(2)
|
||||
})
|
||||
.sum();
|
||||
let r_squared = 1.0 - ss_res / ss_tot;
|
||||
|
||||
(slope, r_squared)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dtc_creation() {
|
||||
let config = DTCConfig::default();
|
||||
let dtc = DiscreteTimeCrystal::new(config);
|
||||
assert_eq!(dtc.positions.len(), 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_period_doubling() {
|
||||
let mut config = DTCConfig::default();
|
||||
config.drive_amplitude = 3.0; // Strong drive to induce period-doubling
|
||||
config.coupling_strength = 0.8;
|
||||
config.n_oscillators = 50;
|
||||
|
||||
let mut dtc = DiscreteTimeCrystal::new(config);
|
||||
|
||||
// Run for a few oscillation periods
|
||||
let duration = 2.0; // 2 seconds = 16 theta cycles
|
||||
let trajectory = dtc.run(duration);
|
||||
|
||||
// Detect period-doubling
|
||||
let (ratio, is_doubled) = dtc.detect_period_doubling(&trajectory);
|
||||
|
||||
println!("Period-doubling ratio: {}", ratio);
|
||||
println!("Is period-doubled: {}", is_doubled);
|
||||
|
||||
// Note: This is stochastic, so we don't assert, just demonstrate
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_order_parameter() {
|
||||
let config = DTCConfig::default();
|
||||
let mut dtc = DiscreteTimeCrystal::new(config);
|
||||
|
||||
let duration = 1.0;
|
||||
let trajectory = dtc.run(duration);
|
||||
|
||||
let m_2 = dtc.compute_order_parameter(&trajectory, 2);
|
||||
|
||||
// Order parameter should be between 0 and 1
|
||||
for &m in &m_2 {
|
||||
assert!(m >= 0.0 && m <= 1.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
479
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/floquet_cognition.rs
vendored
Normal file
479
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/floquet_cognition.rs
vendored
Normal file
@@ -0,0 +1,479 @@
|
||||
// Floquet Cognition: Periodically Driven Cognitive Systems
|
||||
// Implements Floquet theory for neural networks to study time crystal-like dynamics
|
||||
|
||||
use ndarray::{Array1, Array2};
|
||||
use std::f64::consts::PI;
|
||||
|
||||
/// Floquet system configuration
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FloquetConfig {
|
||||
/// Number of neurons
|
||||
pub n_neurons: usize,
|
||||
/// Neural time constant (tau)
|
||||
pub tau: f64,
|
||||
/// Drive period T
|
||||
pub drive_period: f64,
|
||||
/// Drive amplitude
|
||||
pub drive_amplitude: f64,
|
||||
/// Noise level
|
||||
pub noise_level: f64,
|
||||
/// Time step
|
||||
pub dt: f64,
|
||||
}
|
||||
|
||||
impl Default for FloquetConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
n_neurons: 100,
|
||||
tau: 0.01, // 10ms
|
||||
drive_period: 0.125, // 125ms = 8 Hz theta
|
||||
drive_amplitude: 1.0,
|
||||
noise_level: 0.01,
|
||||
dt: 0.001,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Floquet neural system
|
||||
pub struct FloquetCognitiveSystem {
|
||||
config: FloquetConfig,
|
||||
/// Firing rates
|
||||
firing_rates: Array1<f64>,
|
||||
/// Synaptic weight matrix (asymmetric!)
|
||||
weights: Array2<f64>,
|
||||
/// Current time
|
||||
time: f64,
|
||||
/// Phase of driving (0 to 2π)
|
||||
drive_phase: f64,
|
||||
}
|
||||
|
||||
impl FloquetCognitiveSystem {
|
||||
/// Create new Floquet cognitive system
|
||||
pub fn new(config: FloquetConfig, weights: Array2<f64>) -> Self {
|
||||
let n = config.n_neurons;
|
||||
assert_eq!(weights.shape(), &[n, n], "Weight matrix must be n x n");
|
||||
|
||||
// Initialize firing rates randomly
|
||||
let firing_rates = Array1::from_vec((0..n).map(|_| rand::random::<f64>() * 0.1).collect());
|
||||
|
||||
Self {
|
||||
config,
|
||||
firing_rates,
|
||||
weights,
|
||||
time: 0.0,
|
||||
drive_phase: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate asymmetric weight matrix (breaks detailed balance)
|
||||
pub fn generate_asymmetric_weights(n: usize, sparsity: f64, strength: f64) -> Array2<f64> {
|
||||
let mut weights = Array2::zeros((n, n));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
use rand::Rng;
|
||||
for i in 0..n {
|
||||
for j in 0..n {
|
||||
if i != j && rng.gen::<f64>() < sparsity {
|
||||
weights[[i, j]] = rng.gen_range(-strength..strength);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
weights
|
||||
}
|
||||
|
||||
/// Periodic external input (task structure, theta oscillations, etc.)
|
||||
fn external_input(&self, neuron_idx: usize) -> f64 {
|
||||
// Different neurons receive inputs at different phases
|
||||
let phase_offset = 2.0 * PI * neuron_idx as f64 / self.config.n_neurons as f64;
|
||||
self.config.drive_amplitude * (self.drive_phase + phase_offset).cos()
|
||||
}
|
||||
|
||||
/// Activation function (sigmoid-like)
|
||||
fn activation(x: f64) -> f64 {
|
||||
x.tanh()
|
||||
}
|
||||
|
||||
/// Compute derivatives dr/dt
|
||||
fn compute_derivatives(&self) -> Array1<f64> {
|
||||
let n = self.config.n_neurons;
|
||||
let mut derivatives = Array1::zeros(n);
|
||||
|
||||
for i in 0..n {
|
||||
// Recurrent input
|
||||
let mut recurrent_input = 0.0;
|
||||
for j in 0..n {
|
||||
recurrent_input += self.weights[[i, j]] * self.firing_rates[j];
|
||||
}
|
||||
|
||||
// External input
|
||||
let external = self.external_input(i);
|
||||
|
||||
// Noise
|
||||
let noise = rand::random::<f64>() * self.config.noise_level;
|
||||
|
||||
// Neural dynamics: τ dr/dt = -r + f(Wr + I)
|
||||
derivatives[i] =
|
||||
(-self.firing_rates[i] + Self::activation(recurrent_input + external) + noise)
|
||||
/ self.config.tau;
|
||||
}
|
||||
|
||||
derivatives
|
||||
}
|
||||
|
||||
/// Evolve system by one time step
|
||||
pub fn step(&mut self) {
|
||||
let derivatives = self.compute_derivatives();
|
||||
self.firing_rates += &(derivatives * self.config.dt);
|
||||
|
||||
self.time += self.config.dt;
|
||||
self.drive_phase = (2.0 * PI * self.time / self.config.drive_period) % (2.0 * PI);
|
||||
}
|
||||
|
||||
/// Run simulation and record trajectory
|
||||
pub fn run(&mut self, n_periods: usize) -> FloquetTrajectory {
|
||||
let period = self.config.drive_period;
|
||||
let steps_per_period = (period / self.config.dt) as usize;
|
||||
let total_steps = steps_per_period * n_periods;
|
||||
|
||||
let mut trajectory =
|
||||
FloquetTrajectory::new(self.config.n_neurons, total_steps, self.config.dt, period);
|
||||
|
||||
for step in 0..total_steps {
|
||||
self.step();
|
||||
trajectory.record(step, &self.firing_rates, self.drive_phase);
|
||||
}
|
||||
|
||||
trajectory
|
||||
}
|
||||
|
||||
/// Compute monodromy matrix (Floquet multipliers)
|
||||
/// This is the key quantity for detecting time crystal phase
|
||||
pub fn compute_monodromy_matrix(&mut self) -> (Array2<f64>, Vec<f64>) {
|
||||
let n = self.config.n_neurons;
|
||||
let period = self.config.drive_period;
|
||||
let initial_time = self.time;
|
||||
|
||||
// Save initial state
|
||||
let initial_rates = self.firing_rates.clone();
|
||||
|
||||
// Monodromy matrix
|
||||
let mut monodromy = Array2::zeros((n, n));
|
||||
|
||||
// For each basis direction
|
||||
for i in 0..n {
|
||||
// Perturb in direction i
|
||||
let mut perturbed_rates = initial_rates.clone();
|
||||
perturbed_rates[i] += 1e-6;
|
||||
|
||||
self.firing_rates = perturbed_rates;
|
||||
self.time = initial_time;
|
||||
self.drive_phase = (2.0 * PI * self.time / period) % (2.0 * PI);
|
||||
|
||||
// Evolve for one period
|
||||
let steps_per_period = (period / self.config.dt) as usize;
|
||||
for _ in 0..steps_per_period {
|
||||
self.step();
|
||||
}
|
||||
|
||||
// Column i of monodromy matrix
|
||||
for j in 0..n {
|
||||
monodromy[[j, i]] = (self.firing_rates[j] - initial_rates[j]) / 1e-6;
|
||||
}
|
||||
}
|
||||
|
||||
// Restore initial state
|
||||
self.firing_rates = initial_rates;
|
||||
self.time = initial_time;
|
||||
|
||||
// Compute eigenvalues (Floquet multipliers)
|
||||
let eigenvalues = compute_eigenvalues(&monodromy);
|
||||
|
||||
(monodromy, eigenvalues)
|
||||
}
|
||||
|
||||
/// Detect time crystal phase by checking for -1 eigenvalue
|
||||
pub fn detect_time_crystal_phase(&mut self) -> (bool, f64) {
|
||||
let (_, eigenvalues) = self.compute_monodromy_matrix();
|
||||
|
||||
// Look for eigenvalue near -1 (period-doubling)
|
||||
let min_dist_to_minus_one = eigenvalues
|
||||
.iter()
|
||||
.map(|&lambda| (lambda + 1.0).abs())
|
||||
.fold(f64::INFINITY, f64::min);
|
||||
|
||||
let is_time_crystal = min_dist_to_minus_one < 0.1; // Threshold
|
||||
|
||||
(is_time_crystal, min_dist_to_minus_one)
|
||||
}
|
||||
}
|
||||
|
||||
/// Trajectory recorder for Floquet analysis
|
||||
pub struct FloquetTrajectory {
|
||||
/// Firing rates over time: (n_neurons, n_timesteps)
|
||||
pub firing_rates: Vec<Array1<f64>>,
|
||||
/// Drive phase over time
|
||||
pub drive_phases: Vec<f64>,
|
||||
/// Time points
|
||||
pub times: Vec<f64>,
|
||||
/// Configuration
|
||||
pub n_neurons: usize,
|
||||
pub dt: f64,
|
||||
pub drive_period: f64,
|
||||
}
|
||||
|
||||
impl FloquetTrajectory {
|
||||
fn new(n_neurons: usize, n_steps: usize, dt: f64, drive_period: f64) -> Self {
|
||||
Self {
|
||||
firing_rates: Vec::with_capacity(n_steps),
|
||||
drive_phases: Vec::with_capacity(n_steps),
|
||||
times: Vec::with_capacity(n_steps),
|
||||
n_neurons,
|
||||
dt,
|
||||
drive_period,
|
||||
}
|
||||
}
|
||||
|
||||
fn record(&mut self, step: usize, rates: &Array1<f64>, phase: f64) {
|
||||
self.firing_rates.push(rates.clone());
|
||||
self.drive_phases.push(phase);
|
||||
self.times.push(step as f64 * self.dt);
|
||||
}
|
||||
|
||||
/// Compute Poincaré section (stroboscopic map)
|
||||
/// Sample firing rates at same phase each period
|
||||
pub fn poincare_section(&self, phase_threshold: f64) -> Vec<Array1<f64>> {
|
||||
let mut section = Vec::new();
|
||||
|
||||
for (i, &phase) in self.drive_phases.iter().enumerate() {
|
||||
if i > 0 {
|
||||
let prev_phase = self.drive_phases[i - 1];
|
||||
// Detect crossing of threshold phase
|
||||
if prev_phase < phase_threshold && phase >= phase_threshold {
|
||||
section.push(self.firing_rates[i].clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
section
|
||||
}
|
||||
|
||||
/// Check if Poincaré section shows period-doubling
|
||||
/// (adjacent points alternate between two clusters)
|
||||
pub fn detect_period_doubling_poincare(&self) -> bool {
|
||||
let section = self.poincare_section(0.0);
|
||||
|
||||
if section.len() < 4 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compute distances between consecutive points
|
||||
let mut distances = Vec::new();
|
||||
for i in 0..section.len() - 1 {
|
||||
let dist = (§ion[i] - §ion[i + 1]).mapv(|x| x * x).sum().sqrt();
|
||||
distances.push(dist);
|
||||
}
|
||||
|
||||
// In period-doubling, alternating distances: small, large, small, large...
|
||||
// Check for this pattern
|
||||
let mut alternates = 0;
|
||||
for i in 0..distances.len() - 1 {
|
||||
if (distances[i] < distances[i + 1]) != (i % 2 == 0) {
|
||||
alternates += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// If most transitions alternate, we have period-doubling
|
||||
alternates as f64 / distances.len() as f64 > 0.7
|
||||
}
|
||||
|
||||
/// Compute spectral analysis
|
||||
pub fn compute_power_spectrum(&self) -> (Vec<f64>, Vec<f64>) {
|
||||
// Average firing rate across all neurons
|
||||
let signal: Vec<f64> = self
|
||||
.firing_rates
|
||||
.iter()
|
||||
.map(|rates| rates.mean().unwrap())
|
||||
.collect();
|
||||
|
||||
// FFT
|
||||
use rustfft::{num_complex::Complex, FftPlanner};
|
||||
|
||||
let n = signal.len();
|
||||
let mut planner = FftPlanner::new();
|
||||
let fft = planner.plan_fft_forward(n);
|
||||
|
||||
let mut buffer: Vec<Complex<f64>> =
|
||||
signal.iter().map(|&x| Complex { re: x, im: 0.0 }).collect();
|
||||
|
||||
fft.process(&mut buffer);
|
||||
|
||||
let power: Vec<f64> = buffer
|
||||
.iter()
|
||||
.take(n / 2)
|
||||
.map(|c| (c.re * c.re + c.im * c.im) / n as f64)
|
||||
.collect();
|
||||
|
||||
let sample_rate = 1.0 / self.dt;
|
||||
let freqs: Vec<f64> = (0..n / 2)
|
||||
.map(|i| i as f64 * sample_rate / n as f64)
|
||||
.collect();
|
||||
|
||||
(freqs, power)
|
||||
}
|
||||
|
||||
/// Compute order parameter M_k
|
||||
pub fn compute_order_parameter(&self, k: usize) -> Vec<f64> {
|
||||
let omega_0 = 2.0 * PI / self.drive_period;
|
||||
|
||||
self.firing_rates
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(step, rates)| {
|
||||
let _t = step as f64 * self.dt;
|
||||
let n = self.n_neurons;
|
||||
|
||||
// Phases of each neuron
|
||||
let mut sum_real = 0.0;
|
||||
let mut sum_imag = 0.0;
|
||||
|
||||
for i in 0..n {
|
||||
// Simple phase extraction (more sophisticated: use Hilbert transform)
|
||||
let phase = rates[i] * PI; // Map firing rate to phase
|
||||
let arg = k as f64 * omega_0 * phase;
|
||||
sum_real += arg.cos();
|
||||
sum_imag += arg.sin();
|
||||
}
|
||||
|
||||
((sum_real / n as f64).powi(2) + (sum_imag / n as f64).powi(2)).sqrt()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute eigenvalues of matrix (simplified - use proper linear algebra library)
|
||||
fn compute_eigenvalues(matrix: &Array2<f64>) -> Vec<f64> {
|
||||
// This is a placeholder - in practice, use nalgebra or ndarray-linalg
|
||||
// For now, return diagonal elements as rough approximation
|
||||
let n = matrix.shape()[0];
|
||||
(0..n).map(|i| matrix[[i, i]]).collect()
|
||||
}
|
||||
|
||||
/// Phase diagram analyzer
|
||||
pub struct PhaseDiagram {
|
||||
/// Range of drive amplitudes to test
|
||||
pub amplitude_range: Vec<f64>,
|
||||
/// Range of coupling strengths
|
||||
pub coupling_range: Vec<f64>,
|
||||
/// Results: (amplitude, coupling) -> is_time_crystal
|
||||
pub results: Vec<Vec<bool>>,
|
||||
}
|
||||
|
||||
impl PhaseDiagram {
|
||||
pub fn new(
|
||||
amp_min: f64,
|
||||
amp_max: f64,
|
||||
n_amp: usize,
|
||||
coupling_min: f64,
|
||||
coupling_max: f64,
|
||||
n_coupling: usize,
|
||||
) -> Self {
|
||||
let amplitude_range = (0..n_amp)
|
||||
.map(|i| amp_min + (amp_max - amp_min) * i as f64 / (n_amp - 1) as f64)
|
||||
.collect();
|
||||
|
||||
let coupling_range = (0..n_coupling)
|
||||
.map(|i| {
|
||||
coupling_min + (coupling_max - coupling_min) * i as f64 / (n_coupling - 1) as f64
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results = vec![vec![false; n_coupling]; n_amp];
|
||||
|
||||
Self {
|
||||
amplitude_range,
|
||||
coupling_range,
|
||||
results,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute phase diagram by scanning parameter space
|
||||
pub fn compute(&mut self, base_config: FloquetConfig, n_periods: usize) {
|
||||
for (i, &litude) in self.amplitude_range.iter().enumerate() {
|
||||
for (j, &coupling) in self.coupling_range.iter().enumerate() {
|
||||
let mut config = base_config.clone();
|
||||
config.drive_amplitude = amplitude;
|
||||
|
||||
let weights = FloquetCognitiveSystem::generate_asymmetric_weights(
|
||||
config.n_neurons,
|
||||
0.2,
|
||||
coupling,
|
||||
);
|
||||
|
||||
let mut system = FloquetCognitiveSystem::new(config, weights);
|
||||
let trajectory = system.run(n_periods);
|
||||
|
||||
// Detect time crystal from trajectory
|
||||
let is_dtc = trajectory.detect_period_doubling_poincare();
|
||||
self.results[i][j] = is_dtc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Print ASCII phase diagram
|
||||
pub fn print(&self) {
|
||||
println!("\nPhase Diagram: DTC (X) vs Non-DTC (·)");
|
||||
println!("Coupling (horizontal) vs Amplitude (vertical)\n");
|
||||
|
||||
for (i, row) in self.results.iter().enumerate().rev() {
|
||||
print!("{:.2} | ", self.amplitude_range[i]);
|
||||
for &is_dtc in row {
|
||||
print!("{}", if is_dtc { "X" } else { "·" });
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
print!(" ");
|
||||
for _ in &self.coupling_range {
|
||||
print!("-");
|
||||
}
|
||||
println!(
|
||||
"\n {:.2} ... {:.2}",
|
||||
self.coupling_range[0],
|
||||
self.coupling_range[self.coupling_range.len() - 1]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_floquet_system() {
|
||||
let config = FloquetConfig::default();
|
||||
let weights =
|
||||
FloquetCognitiveSystem::generate_asymmetric_weights(config.n_neurons, 0.2, 1.0);
|
||||
|
||||
let mut system = FloquetCognitiveSystem::new(config, weights);
|
||||
let trajectory = system.run(10); // 10 periods
|
||||
|
||||
assert_eq!(trajectory.firing_rates.len(), 10 * 125);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poincare_section() {
|
||||
let config = FloquetConfig::default();
|
||||
let weights =
|
||||
FloquetCognitiveSystem::generate_asymmetric_weights(config.n_neurons, 0.2, 1.0);
|
||||
|
||||
let mut system = FloquetCognitiveSystem::new(config, weights);
|
||||
let trajectory = system.run(10);
|
||||
|
||||
// Use PI as threshold to ensure crossings occur
|
||||
let section = trajectory.poincare_section(std::f64::consts::PI);
|
||||
// The number of crossings depends on dynamics, but method should work
|
||||
// Just verify it returns a vector (may be empty if no crossings)
|
||||
assert!(section.len() >= 0); // Always true, but tests the method works
|
||||
}
|
||||
}
|
||||
19
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/lib.rs
vendored
Normal file
19
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/lib.rs
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Time Crystal Cognition Library
|
||||
// Cognitive Time Crystals: Discrete Time Translation Symmetry Breaking in Working Memory
|
||||
|
||||
pub mod discrete_time_crystal;
|
||||
pub mod floquet_cognition;
|
||||
pub mod simd_optimizations;
|
||||
pub mod temporal_memory;
|
||||
|
||||
// Re-export main types
|
||||
pub use discrete_time_crystal::{DTCConfig, DiscreteTimeCrystal};
|
||||
pub use floquet_cognition::{
|
||||
FloquetCognitiveSystem, FloquetConfig, FloquetTrajectory, PhaseDiagram,
|
||||
};
|
||||
pub use simd_optimizations::{
|
||||
HierarchicalTimeCrystal, SimdDTC, SimdFloquet, TopologicalTimeCrystal,
|
||||
};
|
||||
pub use temporal_memory::{
|
||||
MemoryItem, MemoryStats, TemporalMemory, TemporalMemoryConfig, WorkingMemoryTask,
|
||||
};
|
||||
435
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/simd_optimizations.rs
vendored
Normal file
435
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/simd_optimizations.rs
vendored
Normal file
@@ -0,0 +1,435 @@
|
||||
// SIMD Optimizations for Time Crystal Cognition
|
||||
// Accelerates oscillator dynamics and neural computations using vectorized operations
|
||||
|
||||
use ndarray::{Array1, Array2, Zip};
|
||||
use std::f64::consts::PI;
|
||||
|
||||
/// SIMD-optimized discrete time crystal update
|
||||
/// Uses ndarray's parallel iterators and vectorization
|
||||
pub struct SimdDTC {
|
||||
pub n_oscillators: usize,
|
||||
pub positions: Array1<f64>,
|
||||
pub velocities: Array1<f64>,
|
||||
pub coupling_matrix: Array2<f64>,
|
||||
pub drive_frequency: f64,
|
||||
pub drive_amplitude: f64,
|
||||
pub coupling_strength: f64,
|
||||
pub dissipation: f64,
|
||||
pub dt: f64,
|
||||
pub time: f64,
|
||||
}
|
||||
|
||||
impl SimdDTC {
|
||||
/// Create new SIMD-optimized DTC
|
||||
pub fn new(
|
||||
n_oscillators: usize,
|
||||
coupling_matrix: Array2<f64>,
|
||||
drive_frequency: f64,
|
||||
drive_amplitude: f64,
|
||||
coupling_strength: f64,
|
||||
dissipation: f64,
|
||||
dt: f64,
|
||||
) -> Self {
|
||||
Self {
|
||||
n_oscillators,
|
||||
positions: Array1::zeros(n_oscillators),
|
||||
velocities: Array1::zeros(n_oscillators),
|
||||
coupling_matrix,
|
||||
drive_frequency,
|
||||
drive_amplitude,
|
||||
coupling_strength,
|
||||
dissipation,
|
||||
dt,
|
||||
time: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// SIMD-optimized step using vectorized operations
|
||||
pub fn step_simd(&mut self) {
|
||||
let omega = 2.0 * PI * self.drive_frequency;
|
||||
let drive = self.drive_amplitude * (omega * self.time).cos();
|
||||
|
||||
// Vectorized coupling computation using matrix multiplication
|
||||
// coupling_forces = tanh(J * positions)
|
||||
let activated_positions = self.positions.mapv(|x| x.tanh());
|
||||
let coupling_forces = self.coupling_matrix.dot(&activated_positions);
|
||||
|
||||
// Vectorized position update: positions += velocities * dt
|
||||
Zip::from(&mut self.positions)
|
||||
.and(&self.velocities)
|
||||
.for_each(|p, &v| {
|
||||
*p += v * self.dt;
|
||||
});
|
||||
|
||||
// Vectorized velocity update: velocities += (-positions - dissipation*velocities + coupling + drive) * dt
|
||||
Zip::from(&mut self.velocities)
|
||||
.and(&self.positions)
|
||||
.and(&coupling_forces)
|
||||
.for_each(|v, &p, &coupling| {
|
||||
let force = -p - self.dissipation * *v + self.coupling_strength * coupling + drive;
|
||||
*v += force * self.dt;
|
||||
});
|
||||
|
||||
self.time += self.dt;
|
||||
}
|
||||
|
||||
/// Run simulation with SIMD optimizations
|
||||
pub fn run_simd(&mut self, duration: f64) -> Vec<Array1<f64>> {
|
||||
let n_steps = (duration / self.dt) as usize;
|
||||
let mut trajectory = Vec::with_capacity(n_steps);
|
||||
|
||||
for _ in 0..n_steps {
|
||||
self.step_simd();
|
||||
trajectory.push(self.positions.clone());
|
||||
}
|
||||
|
||||
trajectory
|
||||
}
|
||||
}
|
||||
|
||||
/// SIMD-optimized Floquet system using parallel operations
|
||||
pub struct SimdFloquet {
|
||||
pub n_neurons: usize,
|
||||
pub firing_rates: Array1<f64>,
|
||||
pub weights: Array2<f64>,
|
||||
pub tau: f64,
|
||||
pub drive_period: f64,
|
||||
pub drive_amplitude: f64,
|
||||
pub dt: f64,
|
||||
pub time: f64,
|
||||
pub drive_phase: f64,
|
||||
}
|
||||
|
||||
impl SimdFloquet {
|
||||
pub fn new(
|
||||
n_neurons: usize,
|
||||
weights: Array2<f64>,
|
||||
tau: f64,
|
||||
drive_period: f64,
|
||||
drive_amplitude: f64,
|
||||
dt: f64,
|
||||
) -> Self {
|
||||
Self {
|
||||
n_neurons,
|
||||
firing_rates: Array1::zeros(n_neurons),
|
||||
weights,
|
||||
tau,
|
||||
drive_period,
|
||||
drive_amplitude,
|
||||
dt,
|
||||
time: 0.0,
|
||||
drive_phase: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// SIMD-optimized step using vectorized matrix operations
|
||||
pub fn step_simd(&mut self) {
|
||||
// Vectorized external input computation
|
||||
let phase_offsets = Array1::from_vec(
|
||||
(0..self.n_neurons)
|
||||
.map(|i| 2.0 * PI * i as f64 / self.n_neurons as f64)
|
||||
.collect(),
|
||||
);
|
||||
let external_inputs =
|
||||
phase_offsets.mapv(|offset| self.drive_amplitude * (self.drive_phase + offset).cos());
|
||||
|
||||
// Vectorized recurrent input: W * r
|
||||
let recurrent_inputs = self.weights.dot(&self.firing_rates);
|
||||
|
||||
// Vectorized firing rate update
|
||||
// τ dr/dt = -r + tanh(Wr + I)
|
||||
Zip::from(&mut self.firing_rates)
|
||||
.and(&recurrent_inputs)
|
||||
.and(&external_inputs)
|
||||
.for_each(|r, &rec, &ext| {
|
||||
let derivative = (-*r + (rec + ext).tanh()) / self.tau;
|
||||
*r += derivative * self.dt;
|
||||
});
|
||||
|
||||
self.time += self.dt;
|
||||
self.drive_phase = (2.0 * PI * self.time / self.drive_period) % (2.0 * PI);
|
||||
}
|
||||
|
||||
/// Run SIMD-optimized simulation
|
||||
pub fn run_simd(&mut self, n_periods: usize) -> Vec<Array1<f64>> {
|
||||
let steps_per_period = (self.drive_period / self.dt) as usize;
|
||||
let total_steps = steps_per_period * n_periods;
|
||||
let mut trajectory = Vec::with_capacity(total_steps);
|
||||
|
||||
for _ in 0..total_steps {
|
||||
self.step_simd();
|
||||
trajectory.push(self.firing_rates.clone());
|
||||
}
|
||||
|
||||
trajectory
|
||||
}
|
||||
}
|
||||
|
||||
/// Novel: Hierarchical Time Crystal with Period Multiplication
|
||||
/// Instead of period-doubling (2x), explores period-tripling, quadrupling, etc.
|
||||
pub struct HierarchicalTimeCrystal {
|
||||
pub n_levels: usize,
|
||||
pub oscillators_per_level: usize,
|
||||
pub levels: Vec<Array1<f64>>,
|
||||
pub level_frequencies: Vec<f64>,
|
||||
pub coupling_matrix: Array2<f64>,
|
||||
pub dt: f64,
|
||||
pub time: f64,
|
||||
}
|
||||
|
||||
impl HierarchicalTimeCrystal {
|
||||
/// Create hierarchical time crystal with period multiplication
|
||||
/// Each level oscillates at frequency f/k for k = 1, 2, 3, ...
|
||||
pub fn new(
|
||||
n_levels: usize,
|
||||
oscillators_per_level: usize,
|
||||
base_frequency: f64,
|
||||
dt: f64,
|
||||
) -> Self {
|
||||
let total_oscillators = n_levels * oscillators_per_level;
|
||||
|
||||
// Each level has a different frequency: f, f/2, f/3, f/4, ...
|
||||
let level_frequencies: Vec<f64> = (0..n_levels)
|
||||
.map(|k| base_frequency / (k + 1) as f64)
|
||||
.collect();
|
||||
|
||||
// Initialize each level
|
||||
let levels: Vec<Array1<f64>> = (0..n_levels)
|
||||
.map(|_| Array1::zeros(oscillators_per_level))
|
||||
.collect();
|
||||
|
||||
// Hierarchical coupling: levels couple to neighbors
|
||||
let mut coupling_matrix = Array2::zeros((total_oscillators, total_oscillators));
|
||||
|
||||
// Inter-level coupling
|
||||
for level in 0..n_levels {
|
||||
for i in 0..oscillators_per_level {
|
||||
let idx = level * oscillators_per_level + i;
|
||||
|
||||
// Couple to next level if it exists
|
||||
if level + 1 < n_levels {
|
||||
for j in 0..oscillators_per_level {
|
||||
let next_idx = (level + 1) * oscillators_per_level + j;
|
||||
coupling_matrix[[idx, next_idx]] = 0.3;
|
||||
coupling_matrix[[next_idx, idx]] = 0.3;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
n_levels,
|
||||
oscillators_per_level,
|
||||
levels,
|
||||
level_frequencies,
|
||||
coupling_matrix,
|
||||
dt,
|
||||
time: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update with period multiplication dynamics
|
||||
pub fn step(&mut self) {
|
||||
let total_oscillators = self.n_levels * self.oscillators_per_level;
|
||||
let mut all_positions = Array1::zeros(total_oscillators);
|
||||
|
||||
// Gather all positions
|
||||
for (level, positions) in self.levels.iter().enumerate() {
|
||||
for i in 0..self.oscillators_per_level {
|
||||
all_positions[level * self.oscillators_per_level + i] = positions[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Compute coupling forces
|
||||
let coupling_forces = self.coupling_matrix.dot(&all_positions);
|
||||
|
||||
// Update each level with its own frequency
|
||||
for (level, positions) in self.levels.iter_mut().enumerate() {
|
||||
let omega = 2.0 * PI * self.level_frequencies[level];
|
||||
let drive = (omega * self.time).cos();
|
||||
|
||||
for i in 0..self.oscillators_per_level {
|
||||
let idx = level * self.oscillators_per_level + i;
|
||||
let coupling = coupling_forces[idx];
|
||||
|
||||
// Simple harmonic oscillator with coupling and drive
|
||||
positions[i] += (-positions[i] + coupling + drive) * self.dt;
|
||||
}
|
||||
}
|
||||
|
||||
self.time += self.dt;
|
||||
}
|
||||
|
||||
/// Compute hierarchical order parameter
|
||||
/// Measures synchronization across different temporal scales
|
||||
pub fn hierarchical_order_parameter(&self) -> Vec<f64> {
|
||||
self.levels
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(level, positions)| {
|
||||
let n = positions.len();
|
||||
let omega = 2.0 * PI * self.level_frequencies[level];
|
||||
|
||||
let mut sum_real = 0.0;
|
||||
let mut sum_imag = 0.0;
|
||||
|
||||
for &pos in positions {
|
||||
let phase = pos * PI;
|
||||
sum_real += (omega * phase).cos();
|
||||
sum_imag += (omega * phase).sin();
|
||||
}
|
||||
|
||||
((sum_real / n as f64).powi(2) + (sum_imag / n as f64).powi(2)).sqrt()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Novel discovery: Temporal multiplexing capacity
|
||||
/// Number of distinct temporal "slots" available for information encoding
|
||||
pub fn temporal_multiplexing_capacity(&self) -> usize {
|
||||
// Each level provides log2(period_multiplier) bits
|
||||
// Total capacity is sum across levels
|
||||
(1..=self.n_levels)
|
||||
.map(|k| {
|
||||
// Period k provides log2(k) temporal slots
|
||||
(k as f64).log2().ceil() as usize
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
|
||||
/// Novel: Topological Time Crystal with Protected Edge Modes
|
||||
/// Inspired by topological insulators - edge states resist perturbations
|
||||
pub struct TopologicalTimeCrystal {
|
||||
pub n_sites: usize,
|
||||
pub positions: Array1<f64>,
|
||||
pub velocities: Array1<f64>,
|
||||
pub hopping_matrix: Array2<f64>,
|
||||
pub edge_protection: f64,
|
||||
pub dt: f64,
|
||||
pub time: f64,
|
||||
}
|
||||
|
||||
impl TopologicalTimeCrystal {
|
||||
/// Create topological time crystal with protected edge modes
|
||||
pub fn new(n_sites: usize, hopping_strength: f64, edge_protection: f64, dt: f64) -> Self {
|
||||
let mut hopping_matrix = Array2::zeros((n_sites, n_sites));
|
||||
|
||||
// SSH-like model: alternating hopping strengths
|
||||
for i in 0..n_sites - 1 {
|
||||
let hop = if i % 2 == 0 {
|
||||
hopping_strength * 1.5 // Strong bond
|
||||
} else {
|
||||
hopping_strength * 0.5 // Weak bond
|
||||
};
|
||||
hopping_matrix[[i, i + 1]] = hop;
|
||||
hopping_matrix[[i + 1, i]] = hop;
|
||||
}
|
||||
|
||||
// Edge protection: reduce coupling at boundaries
|
||||
hopping_matrix[[0, 1]] *= edge_protection;
|
||||
hopping_matrix[[1, 0]] *= edge_protection;
|
||||
hopping_matrix[[n_sites - 2, n_sites - 1]] *= edge_protection;
|
||||
hopping_matrix[[n_sites - 1, n_sites - 2]] *= edge_protection;
|
||||
|
||||
Self {
|
||||
n_sites,
|
||||
positions: Array1::zeros(n_sites),
|
||||
velocities: Array1::zeros(n_sites),
|
||||
hopping_matrix,
|
||||
edge_protection,
|
||||
dt,
|
||||
time: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Evolve with topological protection
|
||||
pub fn step(&mut self) {
|
||||
// Hopping forces
|
||||
let hopping_forces = self.hopping_matrix.dot(&self.positions);
|
||||
|
||||
// Update positions and velocities
|
||||
Zip::from(&mut self.positions)
|
||||
.and(&self.velocities)
|
||||
.for_each(|p, &v| {
|
||||
*p += v * self.dt;
|
||||
});
|
||||
|
||||
Zip::from(&mut self.velocities)
|
||||
.and(&self.positions)
|
||||
.and(&hopping_forces)
|
||||
.for_each(|v, &p, &hop| {
|
||||
*v += (-p + hop) * self.dt;
|
||||
});
|
||||
|
||||
self.time += self.dt;
|
||||
}
|
||||
|
||||
/// Measure edge localization (topological protection metric)
|
||||
pub fn edge_localization(&self) -> f64 {
|
||||
let edge_amplitude = self.positions[0].abs() + self.positions[self.n_sites - 1].abs();
|
||||
let bulk_amplitude: f64 = self
|
||||
.positions
|
||||
.iter()
|
||||
.skip(1)
|
||||
.take(self.n_sites - 2)
|
||||
.map(|&x| x.abs())
|
||||
.sum();
|
||||
|
||||
if bulk_amplitude == 0.0 {
|
||||
1.0
|
||||
} else {
|
||||
edge_amplitude / (edge_amplitude + bulk_amplitude)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_simd_dtc() {
|
||||
let n = 50;
|
||||
let coupling_matrix = Array2::zeros((n, n));
|
||||
let mut simd_dtc = SimdDTC::new(n, coupling_matrix, 8.0, 2.0, 0.5, 0.1, 0.001);
|
||||
|
||||
let trajectory = simd_dtc.run_simd(0.5);
|
||||
assert_eq!(trajectory.len(), 500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hierarchical_time_crystal() {
|
||||
let mut htc = HierarchicalTimeCrystal::new(4, 10, 8.0, 0.001);
|
||||
|
||||
for _ in 0..1000 {
|
||||
htc.step();
|
||||
}
|
||||
|
||||
let order_params = htc.hierarchical_order_parameter();
|
||||
assert_eq!(order_params.len(), 4);
|
||||
|
||||
let capacity = htc.temporal_multiplexing_capacity();
|
||||
assert!(capacity > 0);
|
||||
println!("Temporal multiplexing capacity: {} bits", capacity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_topological_time_crystal() {
|
||||
let mut ttc = TopologicalTimeCrystal::new(20, 1.0, 0.3, 0.001);
|
||||
|
||||
// Initialize edge mode
|
||||
ttc.positions[0] = 1.0;
|
||||
ttc.positions[19] = 1.0;
|
||||
|
||||
for _ in 0..5000 {
|
||||
ttc.step();
|
||||
}
|
||||
|
||||
let edge_loc = ttc.edge_localization();
|
||||
println!("Edge localization: {}", edge_loc);
|
||||
|
||||
// Edge modes should remain somewhat localized
|
||||
assert!(edge_loc > 0.1);
|
||||
}
|
||||
}
|
||||
626
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/temporal_memory.rs
vendored
Normal file
626
vendor/ruvector/examples/exo-ai-2025/research/03-time-crystal-cognition/src/temporal_memory.rs
vendored
Normal file
@@ -0,0 +1,626 @@
|
||||
// Temporal Memory: Time Crystal-Based Working Memory Implementation
|
||||
// Models working memory as a discrete time crystal with limit cycle attractors
|
||||
|
||||
use ndarray::{Array1, Array2};
|
||||
use std::collections::VecDeque;
|
||||
use std::f64::consts::PI;
|
||||
|
||||
/// Working memory item
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MemoryItem {
|
||||
/// Content vector (high-dimensional representation)
|
||||
pub content: Array1<f64>,
|
||||
/// Timestamp of encoding
|
||||
pub encoded_time: f64,
|
||||
/// Strength (0-1)
|
||||
pub strength: f64,
|
||||
}
|
||||
|
||||
/// Time crystal working memory configuration
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TemporalMemoryConfig {
|
||||
/// Dimension of memory representations
|
||||
pub memory_dim: usize,
|
||||
/// Number of neurons in prefrontal cortex module
|
||||
pub pfc_neurons: usize,
|
||||
/// Number of neurons in hippocampal module
|
||||
pub hc_neurons: usize,
|
||||
/// Theta oscillation frequency (Hz)
|
||||
pub theta_frequency: f64,
|
||||
/// Coupling strength between PFC and HC
|
||||
pub pfc_hc_coupling: f64,
|
||||
/// Metabolic energy supply rate
|
||||
pub energy_rate: f64,
|
||||
/// Dissipation rate
|
||||
pub dissipation: f64,
|
||||
/// Time step
|
||||
pub dt: f64,
|
||||
/// Maximum working memory capacity
|
||||
pub max_capacity: usize,
|
||||
}
|
||||
|
||||
impl Default for TemporalMemoryConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
memory_dim: 64,
|
||||
pfc_neurons: 200,
|
||||
hc_neurons: 100,
|
||||
theta_frequency: 8.0,
|
||||
pfc_hc_coupling: 0.5,
|
||||
energy_rate: 1.0,
|
||||
dissipation: 0.1,
|
||||
dt: 0.001,
|
||||
max_capacity: 4, // Miller's 4±1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Time crystal working memory system
|
||||
pub struct TemporalMemory {
|
||||
config: TemporalMemoryConfig,
|
||||
/// PFC neural population (maintenance)
|
||||
pfc_neurons: Array1<f64>,
|
||||
/// HC neural population (encoding/retrieval)
|
||||
hc_neurons: Array1<f64>,
|
||||
/// PFC recurrent weights (asymmetric for limit cycles)
|
||||
pfc_weights: Array2<f64>,
|
||||
/// HC recurrent weights
|
||||
hc_weights: Array2<f64>,
|
||||
/// PFC-HC coupling weights
|
||||
pfc_to_hc: Array2<f64>,
|
||||
hc_to_pfc: Array2<f64>,
|
||||
/// Stored memory items
|
||||
memory_items: VecDeque<MemoryItem>,
|
||||
/// Current time
|
||||
time: f64,
|
||||
/// Metabolic energy level
|
||||
energy: f64,
|
||||
/// Time crystal order parameter history
|
||||
order_parameter_history: Vec<f64>,
|
||||
}
|
||||
|
||||
impl TemporalMemory {
|
||||
/// Create new time crystal working memory
|
||||
pub fn new(config: TemporalMemoryConfig) -> Self {
|
||||
let pfc_neurons = Array1::zeros(config.pfc_neurons);
|
||||
let hc_neurons = Array1::zeros(config.hc_neurons);
|
||||
|
||||
// Asymmetric weights for PFC (enable limit cycles)
|
||||
let pfc_weights = Self::generate_limit_cycle_weights(config.pfc_neurons, 0.3, 1.0);
|
||||
|
||||
// Symmetric weights for HC (content storage)
|
||||
let hc_weights = Self::generate_symmetric_weights(config.hc_neurons, 0.2, 0.8);
|
||||
|
||||
// Coupling weights
|
||||
let pfc_to_hc = Self::generate_coupling_weights(
|
||||
config.pfc_neurons,
|
||||
config.hc_neurons,
|
||||
config.pfc_hc_coupling,
|
||||
);
|
||||
let hc_to_pfc = pfc_to_hc.t().to_owned();
|
||||
|
||||
Self {
|
||||
config,
|
||||
pfc_neurons,
|
||||
hc_neurons,
|
||||
pfc_weights,
|
||||
hc_weights,
|
||||
pfc_to_hc,
|
||||
hc_to_pfc,
|
||||
memory_items: VecDeque::new(),
|
||||
time: 0.0,
|
||||
energy: 1.0,
|
||||
order_parameter_history: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate asymmetric weights that support limit cycles
|
||||
fn generate_limit_cycle_weights(n: usize, sparsity: f64, strength: f64) -> Array2<f64> {
|
||||
let mut weights = Array2::zeros((n, n));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
use rand::Rng;
|
||||
for i in 0..n {
|
||||
for j in 0..n {
|
||||
if i != j && rng.gen::<f64>() < sparsity {
|
||||
// Asymmetric: W_ij != W_ji
|
||||
weights[[i, j]] = rng.gen_range(-strength..strength);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
weights
|
||||
}
|
||||
|
||||
/// Generate symmetric weights for content storage
|
||||
fn generate_symmetric_weights(n: usize, sparsity: f64, strength: f64) -> Array2<f64> {
|
||||
let mut weights = Array2::zeros((n, n));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
use rand::Rng;
|
||||
for i in 0..n {
|
||||
for j in i + 1..n {
|
||||
if rng.gen::<f64>() < sparsity {
|
||||
let w = rng.gen_range(0.0..strength);
|
||||
weights[[i, j]] = w;
|
||||
weights[[j, i]] = w; // Symmetric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
weights
|
||||
}
|
||||
|
||||
/// Generate coupling weights between modules
|
||||
fn generate_coupling_weights(n_from: usize, n_to: usize, strength: f64) -> Array2<f64> {
|
||||
let mut weights = Array2::zeros((n_to, n_from));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
use rand::Rng;
|
||||
for i in 0..n_to {
|
||||
for j in 0..n_from {
|
||||
if rng.gen::<f64>() < 0.1 {
|
||||
// Sparse coupling
|
||||
weights[[i, j]] = rng.gen_range(-strength..strength);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
weights
|
||||
}
|
||||
|
||||
/// Theta oscillation (periodic drive)
|
||||
fn theta_drive(&self) -> f64 {
|
||||
let omega = 2.0 * PI * self.config.theta_frequency;
|
||||
(omega * self.time).cos()
|
||||
}
|
||||
|
||||
/// Encode new item into working memory
|
||||
pub fn encode(&mut self, item: Array1<f64>) -> Result<(), &'static str> {
|
||||
if item.len() != self.config.memory_dim {
|
||||
return Err("Item dimension mismatch");
|
||||
}
|
||||
|
||||
if self.memory_items.len() >= self.config.max_capacity {
|
||||
// At capacity - remove weakest item
|
||||
self.memory_items.pop_front();
|
||||
}
|
||||
|
||||
let memory_item = MemoryItem {
|
||||
content: item.clone(),
|
||||
encoded_time: self.time,
|
||||
strength: 1.0,
|
||||
};
|
||||
|
||||
self.memory_items.push_back(memory_item);
|
||||
|
||||
// Activate HC neurons for encoding
|
||||
self.activate_hc_for_item(&item);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Activate HC neurons to represent item
|
||||
fn activate_hc_for_item(&mut self, item: &Array1<f64>) {
|
||||
// Project item into HC neural space
|
||||
// Simple approach: sample neurons proportional to item components
|
||||
let mut rng = rand::thread_rng();
|
||||
use rand::Rng;
|
||||
|
||||
for i in 0..self.config.hc_neurons {
|
||||
let idx = i % item.len();
|
||||
self.hc_neurons[i] = item[idx].tanh() + rng.gen_range(-0.1..0.1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve item from working memory
|
||||
pub fn retrieve(&self, query: &Array1<f64>) -> Option<MemoryItem> {
|
||||
if query.len() != self.config.memory_dim {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Find item with highest similarity
|
||||
let mut best_match = None;
|
||||
let mut best_similarity = -1.0;
|
||||
|
||||
for item in &self.memory_items {
|
||||
let similarity = cosine_similarity(query, &item.content) * item.strength;
|
||||
if similarity > best_similarity {
|
||||
best_similarity = similarity;
|
||||
best_match = Some(item.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if best_similarity > 0.5 {
|
||||
best_match
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Evolve neural dynamics (time crystal maintenance)
|
||||
pub fn step(&mut self) {
|
||||
let theta = self.theta_drive();
|
||||
|
||||
// Update energy (metabolic supply - dissipation)
|
||||
let energy_cost = self.compute_energy_cost();
|
||||
self.energy +=
|
||||
(self.config.energy_rate - energy_cost - self.config.dissipation) * self.config.dt;
|
||||
self.energy = self.energy.clamp(0.0, 2.0);
|
||||
|
||||
// If energy too low, time crystal collapses
|
||||
if self.energy < 0.2 {
|
||||
self.pfc_neurons *= 0.9; // Decay
|
||||
self.hc_neurons *= 0.9;
|
||||
} else {
|
||||
// Update PFC (working memory maintenance via limit cycle)
|
||||
self.update_pfc(theta);
|
||||
|
||||
// Update HC (content representation)
|
||||
self.update_hc(theta);
|
||||
}
|
||||
|
||||
// Decay memory strengths
|
||||
for item in &mut self.memory_items {
|
||||
item.strength *= 0.9999; // Slow decay
|
||||
if (self.time - item.encoded_time) > 10.0 {
|
||||
item.strength *= 0.99; // Faster decay for old items
|
||||
}
|
||||
}
|
||||
|
||||
// Remove forgotten items
|
||||
self.memory_items.retain(|item| item.strength > 0.1);
|
||||
|
||||
// Compute and store order parameter
|
||||
let m_k = self.compute_order_parameter(2);
|
||||
self.order_parameter_history.push(m_k);
|
||||
|
||||
self.time += self.config.dt;
|
||||
}
|
||||
|
||||
/// Update PFC neurons (limit cycle dynamics for maintenance)
|
||||
fn update_pfc(&mut self, theta_drive: f64) {
|
||||
let n = self.config.pfc_neurons;
|
||||
let mut updates = Array1::zeros(n);
|
||||
|
||||
for i in 0..n {
|
||||
// Recurrent input (asymmetric → limit cycles)
|
||||
let mut recurrent = 0.0;
|
||||
for j in 0..n {
|
||||
recurrent += self.pfc_weights[[i, j]] * self.pfc_neurons[j];
|
||||
}
|
||||
|
||||
// Input from HC
|
||||
let mut hc_input = 0.0;
|
||||
for j in 0..self.config.hc_neurons {
|
||||
hc_input += self.hc_to_pfc[[i, j]] * self.hc_neurons[j];
|
||||
}
|
||||
|
||||
// Theta drive
|
||||
let drive = theta_drive * 0.5;
|
||||
|
||||
// Neural dynamics: dr/dt = -r + tanh(W*r + I + θ)
|
||||
let total_input = recurrent + hc_input + drive;
|
||||
updates[i] = (-self.pfc_neurons[i] + total_input.tanh()) * self.config.dt;
|
||||
}
|
||||
|
||||
self.pfc_neurons += &updates;
|
||||
}
|
||||
|
||||
/// Update HC neurons (content representation)
|
||||
fn update_hc(&mut self, theta_drive: f64) {
|
||||
let n = self.config.hc_neurons;
|
||||
let mut updates = Array1::zeros(n);
|
||||
|
||||
for i in 0..n {
|
||||
// Recurrent input (symmetric → stable attractors)
|
||||
let mut recurrent = 0.0;
|
||||
for j in 0..n {
|
||||
recurrent += self.hc_weights[[i, j]] * self.hc_neurons[j];
|
||||
}
|
||||
|
||||
// Input from PFC
|
||||
let mut pfc_input = 0.0;
|
||||
for j in 0..self.config.pfc_neurons {
|
||||
pfc_input += self.pfc_to_hc[[i, j]] * self.pfc_neurons[j];
|
||||
}
|
||||
|
||||
// Theta modulation
|
||||
let modulation = 1.0 + 0.3 * theta_drive;
|
||||
|
||||
// Neural dynamics
|
||||
let total_input = (recurrent + pfc_input) * modulation;
|
||||
updates[i] = (-self.hc_neurons[i] + total_input.tanh()) * self.config.dt;
|
||||
}
|
||||
|
||||
self.hc_neurons += &updates;
|
||||
}
|
||||
|
||||
/// Compute energy cost (proportional to neural activity)
|
||||
fn compute_energy_cost(&self) -> f64 {
|
||||
let pfc_activity = self.pfc_neurons.mapv(|x| x.abs()).sum();
|
||||
let hc_activity = self.hc_neurons.mapv(|x| x.abs()).sum();
|
||||
(pfc_activity + hc_activity) * 0.001
|
||||
}
|
||||
|
||||
/// Compute time crystal order parameter M_k
|
||||
fn compute_order_parameter(&self, k: usize) -> f64 {
|
||||
let omega_0 = 2.0 * PI * self.config.theta_frequency;
|
||||
let n = self.config.pfc_neurons;
|
||||
|
||||
// Phases of PFC neurons
|
||||
let mut sum_real = 0.0;
|
||||
let mut sum_imag = 0.0;
|
||||
|
||||
for i in 0..n {
|
||||
let phase = self.pfc_neurons[i] * PI / 2.0; // Map activity to phase
|
||||
let arg = k as f64 * omega_0 * phase;
|
||||
sum_real += arg.cos();
|
||||
sum_imag += arg.sin();
|
||||
}
|
||||
|
||||
((sum_real / n as f64).powi(2) + (sum_imag / n as f64).powi(2)).sqrt()
|
||||
}
|
||||
|
||||
/// Get current capacity usage
|
||||
pub fn current_capacity(&self) -> usize {
|
||||
self.memory_items.len()
|
||||
}
|
||||
|
||||
/// Get average memory strength
|
||||
pub fn average_strength(&self) -> f64 {
|
||||
if self.memory_items.is_empty() {
|
||||
0.0
|
||||
} else {
|
||||
self.memory_items
|
||||
.iter()
|
||||
.map(|item| item.strength)
|
||||
.sum::<f64>()
|
||||
/ self.memory_items.len() as f64
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if system is in time crystal phase
|
||||
pub fn is_time_crystal_phase(&self) -> bool {
|
||||
if self.order_parameter_history.len() < 100 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Average recent order parameter
|
||||
let recent: Vec<f64> = self
|
||||
.order_parameter_history
|
||||
.iter()
|
||||
.rev()
|
||||
.take(100)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let avg_m_k = recent.iter().sum::<f64>() / recent.len() as f64;
|
||||
|
||||
// CTC phase if M_k > 0.5
|
||||
avg_m_k > 0.5
|
||||
}
|
||||
|
||||
/// Get statistics
|
||||
pub fn get_stats(&self) -> MemoryStats {
|
||||
MemoryStats {
|
||||
capacity: self.current_capacity(),
|
||||
max_capacity: self.config.max_capacity,
|
||||
avg_strength: self.average_strength(),
|
||||
energy: self.energy,
|
||||
is_time_crystal: self.is_time_crystal_phase(),
|
||||
order_parameter: self.order_parameter_history.last().cloned().unwrap_or(0.0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Memory statistics
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MemoryStats {
|
||||
pub capacity: usize,
|
||||
pub max_capacity: usize,
|
||||
pub avg_strength: f64,
|
||||
pub energy: f64,
|
||||
pub is_time_crystal: bool,
|
||||
pub order_parameter: f64,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MemoryStats {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"WM: {}/{} items | Strength: {:.2} | Energy: {:.2} | Time Crystal: {} | M_2: {:.3}",
|
||||
self.capacity,
|
||||
self.max_capacity,
|
||||
self.avg_strength,
|
||||
self.energy,
|
||||
if self.is_time_crystal { "YES" } else { "NO " },
|
||||
self.order_parameter
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Cosine similarity between vectors
|
||||
fn cosine_similarity(a: &Array1<f64>, b: &Array1<f64>) -> f64 {
|
||||
let dot = a.dot(b);
|
||||
let norm_a = a.dot(a).sqrt();
|
||||
let norm_b = b.dot(b).sqrt();
|
||||
|
||||
if norm_a == 0.0 || norm_b == 0.0 {
|
||||
0.0
|
||||
} else {
|
||||
dot / (norm_a * norm_b)
|
||||
}
|
||||
}
|
||||
|
||||
/// Working memory task simulator
|
||||
pub struct WorkingMemoryTask {
|
||||
memory: TemporalMemory,
|
||||
/// Sequence of items to remember
|
||||
items: Vec<Array1<f64>>,
|
||||
/// Retrieval queries
|
||||
queries: Vec<Array1<f64>>,
|
||||
/// Accuracy history
|
||||
accuracy_history: Vec<f64>,
|
||||
}
|
||||
|
||||
impl WorkingMemoryTask {
|
||||
pub fn new(config: TemporalMemoryConfig, n_items: usize, memory_dim: usize) -> Self {
|
||||
let mut rng = rand::thread_rng();
|
||||
use rand::Rng;
|
||||
|
||||
// Generate random items
|
||||
let items: Vec<Array1<f64>> = (0..n_items)
|
||||
.map(|_| Array1::from_vec((0..memory_dim).map(|_| rng.gen_range(-1.0..1.0)).collect()))
|
||||
.collect();
|
||||
|
||||
// Queries are same as items (exact recall)
|
||||
let queries = items.clone();
|
||||
|
||||
Self {
|
||||
memory: TemporalMemory::new(config),
|
||||
items,
|
||||
queries,
|
||||
accuracy_history: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Run delayed match-to-sample task
|
||||
pub fn run_delayed_match_to_sample(&mut self, encoding_duration: f64, delay_duration: f64) {
|
||||
let dt = self.memory.config.dt;
|
||||
|
||||
// Encoding phase: present items sequentially
|
||||
for item in &self.items {
|
||||
self.memory.encode(item.clone()).unwrap();
|
||||
|
||||
// Maintenance during encoding
|
||||
let encoding_steps = (encoding_duration / dt) as usize;
|
||||
for _ in 0..encoding_steps {
|
||||
self.memory.step();
|
||||
}
|
||||
}
|
||||
|
||||
// Delay phase: maintain without input
|
||||
let delay_steps = (delay_duration / dt) as usize;
|
||||
for _ in 0..delay_steps {
|
||||
self.memory.step();
|
||||
}
|
||||
|
||||
// Retrieval phase: test recall
|
||||
let mut correct = 0;
|
||||
for query in self.queries.iter() {
|
||||
if let Some(retrieved) = self.memory.retrieve(query) {
|
||||
let similarity = cosine_similarity(query, &retrieved.content);
|
||||
if similarity > 0.8 {
|
||||
correct += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let accuracy = correct as f64 / self.queries.len() as f64;
|
||||
self.accuracy_history.push(accuracy);
|
||||
}
|
||||
|
||||
/// Get performance metrics
|
||||
pub fn get_performance(&self) -> (f64, bool) {
|
||||
let accuracy = self.accuracy_history.last().cloned().unwrap_or(0.0);
|
||||
let is_time_crystal = self.memory.is_time_crystal_phase();
|
||||
(accuracy, is_time_crystal)
|
||||
}
|
||||
|
||||
/// Print summary
|
||||
pub fn print_summary(&self) {
|
||||
let (accuracy, is_tc) = self.get_performance();
|
||||
let stats = self.memory.get_stats();
|
||||
|
||||
println!("\n=== Working Memory Task Summary ===");
|
||||
println!("Accuracy: {:.1}%", accuracy * 100.0);
|
||||
println!("{}", stats);
|
||||
println!("Time Crystal enables accurate working memory: {}", is_tc);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_temporal_memory_creation() {
|
||||
let config = TemporalMemoryConfig::default();
|
||||
let memory = TemporalMemory::new(config);
|
||||
assert_eq!(memory.current_capacity(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_retrieve() {
|
||||
let mut config = TemporalMemoryConfig::default();
|
||||
let mut memory = TemporalMemory::new(config);
|
||||
|
||||
// Create item with correct dimension (64)
|
||||
let item = Array1::from_vec(vec![1.0; 64]);
|
||||
memory.encode(item.clone()).unwrap();
|
||||
|
||||
// Maintain for a while
|
||||
for _ in 0..1000 {
|
||||
memory.step();
|
||||
}
|
||||
|
||||
let retrieved = memory.retrieve(&item);
|
||||
assert!(retrieved.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_working_memory_task() {
|
||||
let config = TemporalMemoryConfig::default();
|
||||
let mut task = WorkingMemoryTask::new(config, 3, 64);
|
||||
|
||||
task.run_delayed_match_to_sample(0.5, 1.0);
|
||||
|
||||
let (accuracy, _) = task.get_performance();
|
||||
println!("Task accuracy: {:.1}%", accuracy * 100.0);
|
||||
|
||||
// With time crystal dynamics, accuracy should be reasonable
|
||||
assert!(accuracy > 0.3); // At least 30% recall
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_capacity_limit() {
|
||||
let mut config = TemporalMemoryConfig::default();
|
||||
config.max_capacity = 3;
|
||||
|
||||
let mut memory = TemporalMemory::new(config);
|
||||
|
||||
// Encode 5 items (exceeds capacity)
|
||||
for i in 0..5 {
|
||||
let item = Array1::from_vec(vec![i as f64; 64]);
|
||||
memory.encode(item).unwrap();
|
||||
}
|
||||
|
||||
// Should only keep 3 most recent
|
||||
assert_eq!(memory.current_capacity(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_time_crystal_phase() {
|
||||
let mut config = TemporalMemoryConfig::default();
|
||||
config.pfc_neurons = 100;
|
||||
|
||||
let mut memory = TemporalMemory::new(config);
|
||||
|
||||
// Encode item and maintain
|
||||
let item = Array1::from_vec(vec![1.0; 64]);
|
||||
memory.encode(item).unwrap();
|
||||
|
||||
// Run for several theta cycles
|
||||
for _ in 0..10000 {
|
||||
memory.step();
|
||||
}
|
||||
|
||||
// Check if time crystal phase emerged
|
||||
let is_tc = memory.is_time_crystal_phase();
|
||||
println!("Time crystal phase: {}", is_tc);
|
||||
|
||||
// Should have some order parameter history
|
||||
assert!(memory.order_parameter_history.len() > 100);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user