feat: Training mode, ADR docs, vitals and wifiscan crates
- Add --train CLI flag with dataset loading, graph transformer training, cosine-scheduled SGD, PCK/OKS validation, and checkpoint saving - Refactor main.rs to import training modules from lib.rs instead of duplicating mod declarations - Add ADR-021 (vital sign detection), ADR-022 (Windows WiFi enhanced fidelity), ADR-023 (trained DensePose pipeline) documentation - Add wifi-densepose-vitals crate: breathing, heartrate, anomaly detection, preprocessor, and temporal store - Add wifi-densepose-wifiscan crate: 8-stage signal intelligence pipeline with netsh/wlanapi adapters, multi-BSSID registry, attention weighting, spatial correlation, and breathing extraction Co-Authored-By: claude-flow <ruv@ruv.net>
This commit is contained in:
@@ -745,4 +745,94 @@ mod tests {
|
||||
assert!((sum - 1.0).abs() < 1e-5);
|
||||
for &wi in &w3 { assert!(wi.is_finite()); }
|
||||
}
|
||||
|
||||
// ── Weight serialization integration tests ────────────────────────
|
||||
|
||||
#[test]
|
||||
fn linear_flatten_unflatten_roundtrip() {
|
||||
let lin = Linear::with_seed(8, 4, 42);
|
||||
let mut flat = Vec::new();
|
||||
lin.flatten_into(&mut flat);
|
||||
assert_eq!(flat.len(), lin.param_count());
|
||||
let (restored, consumed) = Linear::unflatten_from(&flat, 8, 4);
|
||||
assert_eq!(consumed, flat.len());
|
||||
let inp = vec![1.0f32; 8];
|
||||
assert_eq!(lin.forward(&inp), restored.forward(&inp));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cross_attention_flatten_unflatten_roundtrip() {
|
||||
let ca = CrossAttention::new(16, 4);
|
||||
let mut flat = Vec::new();
|
||||
ca.flatten_into(&mut flat);
|
||||
assert_eq!(flat.len(), ca.param_count());
|
||||
let (restored, consumed) = CrossAttention::unflatten_from(&flat, 16, 4);
|
||||
assert_eq!(consumed, flat.len());
|
||||
let q = vec![vec![0.5f32; 16]; 3];
|
||||
let k = vec![vec![0.3f32; 16]; 5];
|
||||
let v = vec![vec![0.7f32; 16]; 5];
|
||||
let orig = ca.forward(&q, &k, &v);
|
||||
let rest = restored.forward(&q, &k, &v);
|
||||
for (a, b) in orig.iter().zip(rest.iter()) {
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
assert!((x - y).abs() < 1e-6, "mismatch: {x} vs {y}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transformer_weight_roundtrip() {
|
||||
let config = TransformerConfig {
|
||||
n_subcarriers: 16, n_keypoints: 17, d_model: 8, n_heads: 2, n_gnn_layers: 1,
|
||||
};
|
||||
let t = CsiToPoseTransformer::new(config.clone());
|
||||
let weights = t.flatten_weights();
|
||||
assert_eq!(weights.len(), t.param_count());
|
||||
|
||||
let mut t2 = CsiToPoseTransformer::new(config);
|
||||
t2.unflatten_weights(&weights).expect("unflatten should succeed");
|
||||
|
||||
// Forward pass should produce identical results
|
||||
let csi = vec![vec![0.5f32; 16]; 4];
|
||||
let out1 = t.forward(&csi);
|
||||
let out2 = t2.forward(&csi);
|
||||
for (a, b) in out1.keypoints.iter().zip(out2.keypoints.iter()) {
|
||||
assert!((a.0 - b.0).abs() < 1e-6);
|
||||
assert!((a.1 - b.1).abs() < 1e-6);
|
||||
assert!((a.2 - b.2).abs() < 1e-6);
|
||||
}
|
||||
for (a, b) in out1.confidences.iter().zip(out2.confidences.iter()) {
|
||||
assert!((a - b).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transformer_param_count_positive() {
|
||||
let t = CsiToPoseTransformer::new(TransformerConfig::default());
|
||||
assert!(t.param_count() > 1000, "expected many params, got {}", t.param_count());
|
||||
let flat = t.flatten_weights();
|
||||
assert_eq!(flat.len(), t.param_count());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gnn_stack_flatten_unflatten() {
|
||||
let bg = BodyGraph::new();
|
||||
let gnn = GnnStack::new(8, 8, 2, &bg);
|
||||
let mut flat = Vec::new();
|
||||
gnn.flatten_into(&mut flat);
|
||||
assert_eq!(flat.len(), gnn.param_count());
|
||||
|
||||
let mut gnn2 = GnnStack::new(8, 8, 2, &bg);
|
||||
let consumed = gnn2.unflatten_from(&flat);
|
||||
assert_eq!(consumed, flat.len());
|
||||
|
||||
let feats = vec![vec![1.0f32; 8]; 17];
|
||||
let o1 = gnn.forward(&feats);
|
||||
let o2 = gnn2.forward(&feats);
|
||||
for (a, b) in o1.iter().zip(o2.iter()) {
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
assert!((x - y).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,11 +11,9 @@
|
||||
mod rvf_container;
|
||||
mod rvf_pipeline;
|
||||
mod vital_signs;
|
||||
mod graph_transformer;
|
||||
mod trainer;
|
||||
mod dataset;
|
||||
mod sparse_inference;
|
||||
mod sona;
|
||||
|
||||
// Training pipeline modules (exposed via lib.rs)
|
||||
use wifi_densepose_sensing_server::{graph_transformer, trainer, dataset};
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::net::SocketAddr;
|
||||
@@ -1538,6 +1536,169 @@ async fn main() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle --train mode: train a model and exit
|
||||
if args.train {
|
||||
eprintln!("=== WiFi-DensePose Training Mode ===");
|
||||
|
||||
// Build data pipeline
|
||||
let ds_path = args.dataset.clone().unwrap_or_else(|| PathBuf::from("data"));
|
||||
let source = match args.dataset_type.as_str() {
|
||||
"wipose" => dataset::DataSource::WiPose(ds_path.clone()),
|
||||
_ => dataset::DataSource::MmFi(ds_path.clone()),
|
||||
};
|
||||
let pipeline = dataset::DataPipeline::new(dataset::DataConfig {
|
||||
source,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Load samples
|
||||
let samples = match pipeline.load() {
|
||||
Ok(s) if !s.is_empty() => {
|
||||
eprintln!("Loaded {} samples from {}", s.len(), ds_path.display());
|
||||
s
|
||||
}
|
||||
Ok(_) => {
|
||||
eprintln!("No samples found at {}. Generating synthetic training data...", ds_path.display());
|
||||
// Generate synthetic samples for testing the pipeline
|
||||
let mut synth = Vec::new();
|
||||
for i in 0..50 {
|
||||
let csi: Vec<Vec<f32>> = (0..4).map(|a| {
|
||||
(0..56).map(|s| ((i * 7 + a * 13 + s) as f32 * 0.31).sin() * 0.5).collect()
|
||||
}).collect();
|
||||
let mut kps = [(0.0f32, 0.0f32, 1.0f32); 17];
|
||||
for (k, kp) in kps.iter_mut().enumerate() {
|
||||
kp.0 = (k as f32 * 0.1 + i as f32 * 0.02).sin() * 100.0 + 320.0;
|
||||
kp.1 = (k as f32 * 0.15 + i as f32 * 0.03).cos() * 80.0 + 240.0;
|
||||
}
|
||||
synth.push(dataset::TrainingSample {
|
||||
csi_window: csi,
|
||||
pose_label: dataset::PoseLabel {
|
||||
keypoints: kps,
|
||||
body_parts: Vec::new(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
source: "synthetic",
|
||||
});
|
||||
}
|
||||
synth
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to load dataset: {e}");
|
||||
eprintln!("Generating synthetic training data...");
|
||||
let mut synth = Vec::new();
|
||||
for i in 0..50 {
|
||||
let csi: Vec<Vec<f32>> = (0..4).map(|a| {
|
||||
(0..56).map(|s| ((i * 7 + a * 13 + s) as f32 * 0.31).sin() * 0.5).collect()
|
||||
}).collect();
|
||||
let mut kps = [(0.0f32, 0.0f32, 1.0f32); 17];
|
||||
for (k, kp) in kps.iter_mut().enumerate() {
|
||||
kp.0 = (k as f32 * 0.1 + i as f32 * 0.02).sin() * 100.0 + 320.0;
|
||||
kp.1 = (k as f32 * 0.15 + i as f32 * 0.03).cos() * 80.0 + 240.0;
|
||||
}
|
||||
synth.push(dataset::TrainingSample {
|
||||
csi_window: csi,
|
||||
pose_label: dataset::PoseLabel {
|
||||
keypoints: kps,
|
||||
body_parts: Vec::new(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
source: "synthetic",
|
||||
});
|
||||
}
|
||||
synth
|
||||
}
|
||||
};
|
||||
|
||||
// Convert dataset samples to trainer format
|
||||
let trainer_samples: Vec<trainer::TrainingSample> = samples.iter()
|
||||
.map(trainer::from_dataset_sample)
|
||||
.collect();
|
||||
|
||||
// Split 80/20 train/val
|
||||
let split = (trainer_samples.len() * 4) / 5;
|
||||
let (train_data, val_data) = trainer_samples.split_at(split.max(1));
|
||||
eprintln!("Train: {} samples, Val: {} samples", train_data.len(), val_data.len());
|
||||
|
||||
// Create transformer + trainer
|
||||
let n_subcarriers = train_data.first()
|
||||
.and_then(|s| s.csi_features.first())
|
||||
.map(|f| f.len())
|
||||
.unwrap_or(56);
|
||||
let tf_config = graph_transformer::TransformerConfig {
|
||||
n_subcarriers,
|
||||
n_keypoints: 17,
|
||||
d_model: 64,
|
||||
n_heads: 4,
|
||||
n_gnn_layers: 2,
|
||||
};
|
||||
let transformer = graph_transformer::CsiToPoseTransformer::new(tf_config);
|
||||
eprintln!("Transformer params: {}", transformer.param_count());
|
||||
|
||||
let trainer_config = trainer::TrainerConfig {
|
||||
epochs: args.epochs,
|
||||
batch_size: 8,
|
||||
lr: 0.001,
|
||||
warmup_epochs: 5,
|
||||
min_lr: 1e-6,
|
||||
early_stop_patience: 20,
|
||||
checkpoint_every: 10,
|
||||
..Default::default()
|
||||
};
|
||||
let mut t = trainer::Trainer::with_transformer(trainer_config, transformer);
|
||||
|
||||
// Run training
|
||||
eprintln!("Starting training for {} epochs...", args.epochs);
|
||||
let result = t.run_training(train_data, val_data);
|
||||
eprintln!("Training complete in {:.1}s", result.total_time_secs);
|
||||
eprintln!(" Best epoch: {}, PCK@0.2: {:.4}, OKS mAP: {:.4}",
|
||||
result.best_epoch, result.best_pck, result.best_oks);
|
||||
|
||||
// Save checkpoint
|
||||
if let Some(ref ckpt_dir) = args.checkpoint_dir {
|
||||
let _ = std::fs::create_dir_all(ckpt_dir);
|
||||
let ckpt_path = ckpt_dir.join("best_checkpoint.json");
|
||||
let ckpt = t.checkpoint();
|
||||
match ckpt.save_to_file(&ckpt_path) {
|
||||
Ok(()) => eprintln!("Checkpoint saved to {}", ckpt_path.display()),
|
||||
Err(e) => eprintln!("Failed to save checkpoint: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
// Sync weights back to transformer and save as RVF
|
||||
t.sync_transformer_weights();
|
||||
if let Some(ref save_path) = args.save_rvf {
|
||||
eprintln!("Saving trained model to RVF: {}", save_path.display());
|
||||
let weights = t.params().to_vec();
|
||||
let mut builder = RvfBuilder::new();
|
||||
builder.add_manifest(
|
||||
"wifi-densepose-trained",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
"WiFi DensePose trained model weights",
|
||||
);
|
||||
builder.add_metadata(&serde_json::json!({
|
||||
"training": {
|
||||
"epochs": args.epochs,
|
||||
"best_epoch": result.best_epoch,
|
||||
"best_pck": result.best_pck,
|
||||
"best_oks": result.best_oks,
|
||||
"n_train_samples": train_data.len(),
|
||||
"n_val_samples": val_data.len(),
|
||||
"n_subcarriers": n_subcarriers,
|
||||
"param_count": weights.len(),
|
||||
},
|
||||
}));
|
||||
builder.add_vital_config(&VitalSignConfig::default());
|
||||
builder.add_weights(&weights);
|
||||
match builder.write_to_file(save_path) {
|
||||
Ok(()) => eprintln!("RVF saved ({} params, {} bytes)",
|
||||
weights.len(), weights.len() * 4),
|
||||
Err(e) => eprintln!("Failed to save RVF: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
info!("WiFi-DensePose Sensing Server (Rust + Axum + RuVector)");
|
||||
info!(" HTTP: http://localhost:{}", args.http_port);
|
||||
info!(" WebSocket: ws://localhost:{}/ws/sensing", args.ws_port);
|
||||
@@ -1761,10 +1922,18 @@ async fn main() {
|
||||
"uptime_secs": s.start_time.elapsed().as_secs(),
|
||||
}));
|
||||
builder.add_vital_config(&VitalSignConfig::default());
|
||||
// Save dummy weights (placeholder for real model weights)
|
||||
builder.add_weights(&[0.0f32; 0]);
|
||||
// Save transformer weights if a model is loaded, otherwise empty
|
||||
let weights: Vec<f32> = if s.model_loaded {
|
||||
// If we loaded via --model, the progressive loader has the weights
|
||||
// For now, save runtime state placeholder
|
||||
let tf = graph_transformer::CsiToPoseTransformer::new(Default::default());
|
||||
tf.flatten_weights()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
builder.add_weights(&weights);
|
||||
match builder.write_to_file(save_path) {
|
||||
Ok(()) => info!(" RVF saved successfully"),
|
||||
Ok(()) => info!(" RVF saved ({} weight params)", weights.len()),
|
||||
Err(e) => error!(" Failed to save RVF: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -687,4 +687,67 @@ mod tests {
|
||||
assert!(r.speedup > 0.0);
|
||||
assert!(r.accuracy_loss.is_finite());
|
||||
}
|
||||
|
||||
// ── Quantization integration tests ────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn apply_quantization_enables_quantized_forward() {
|
||||
let w = vec![
|
||||
vec![1.0, 2.0, 3.0, 4.0],
|
||||
vec![-1.0, -2.0, -3.0, -4.0],
|
||||
vec![0.5, 1.5, 2.5, 3.5],
|
||||
];
|
||||
let b = vec![0.1, 0.2, 0.3];
|
||||
let mut m = SparseModel::new(SparseConfig {
|
||||
quant_mode: QuantMode::Int8Symmetric,
|
||||
..Default::default()
|
||||
});
|
||||
m.add_layer("fc1", w.clone(), b.clone());
|
||||
|
||||
// Before quantization: dense forward
|
||||
let input = vec![1.0, 0.5, -1.0, 0.0];
|
||||
let dense_out = m.forward(&input);
|
||||
|
||||
// Apply quantization
|
||||
m.apply_quantization();
|
||||
|
||||
// After quantization: should use dequantized weights
|
||||
let quant_out = m.forward(&input);
|
||||
|
||||
// Output should be close to dense (within INT8 precision)
|
||||
for (d, q) in dense_out.iter().zip(quant_out.iter()) {
|
||||
let rel_err = if d.abs() > 0.01 { (d - q).abs() / d.abs() } else { (d - q).abs() };
|
||||
assert!(rel_err < 0.05, "quantized error too large: dense={d}, quant={q}, err={rel_err}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn quantized_forward_accuracy_within_5_percent() {
|
||||
// Multi-layer model
|
||||
let mut m = SparseModel::new(SparseConfig {
|
||||
quant_mode: QuantMode::Int8Symmetric,
|
||||
..Default::default()
|
||||
});
|
||||
let w1: Vec<Vec<f32>> = (0..8).map(|r| {
|
||||
(0..8).map(|c| ((r * 8 + c) as f32 * 0.17).sin() * 2.0).collect()
|
||||
}).collect();
|
||||
let b1 = vec![0.0f32; 8];
|
||||
let w2: Vec<Vec<f32>> = (0..4).map(|r| {
|
||||
(0..8).map(|c| ((r * 8 + c) as f32 * 0.23).cos() * 1.5).collect()
|
||||
}).collect();
|
||||
let b2 = vec![0.0f32; 4];
|
||||
m.add_layer("fc1", w1, b1);
|
||||
m.add_layer("fc2", w2, b2);
|
||||
|
||||
let input = vec![1.0, -0.5, 0.3, 0.7, -0.2, 0.9, -0.4, 0.6];
|
||||
let dense_out = m.forward(&input);
|
||||
|
||||
m.apply_quantization();
|
||||
let quant_out = m.forward(&input);
|
||||
|
||||
// MSE between dense and quantized should be small
|
||||
let mse: f32 = dense_out.iter().zip(quant_out.iter())
|
||||
.map(|(d, q)| (d - q).powi(2)).sum::<f32>() / dense_out.len() as f32;
|
||||
assert!(mse < 0.5, "quantization MSE too large: {mse}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -777,4 +777,98 @@ mod tests {
|
||||
let _ = std::fs::remove_file(&path);
|
||||
let _ = std::fs::remove_dir(&dir);
|
||||
}
|
||||
|
||||
// ── Integration tests: transformer + trainer pipeline ──────────
|
||||
|
||||
#[test]
|
||||
fn dataset_to_trainer_conversion() {
|
||||
let ds = crate::dataset::TrainingSample {
|
||||
csi_window: vec![vec![1.0; 8]; 4],
|
||||
pose_label: crate::dataset::PoseLabel {
|
||||
keypoints: {
|
||||
let mut kp = [(0.0f32, 0.0f32, 1.0f32); 17];
|
||||
for (i, k) in kp.iter_mut().enumerate() {
|
||||
k.0 = i as f32; k.1 = i as f32 * 2.0;
|
||||
}
|
||||
kp
|
||||
},
|
||||
body_parts: Vec::new(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
source: "test",
|
||||
};
|
||||
let ts = from_dataset_sample(&ds);
|
||||
assert_eq!(ts.csi_features.len(), 4);
|
||||
assert_eq!(ts.csi_features[0].len(), 8);
|
||||
assert_eq!(ts.target_keypoints.len(), 17);
|
||||
assert!((ts.target_keypoints[0].0 - 0.0).abs() < 1e-6);
|
||||
assert!((ts.target_keypoints[1].0 - 1.0).abs() < 1e-6);
|
||||
assert!(ts.target_body_parts.is_empty()); // no body parts in source
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trainer_with_transformer_runs_epoch() {
|
||||
use crate::graph_transformer::{CsiToPoseTransformer, TransformerConfig};
|
||||
let tf_config = TransformerConfig {
|
||||
n_subcarriers: 8, n_keypoints: 17, d_model: 8, n_heads: 2, n_gnn_layers: 1,
|
||||
};
|
||||
let transformer = CsiToPoseTransformer::new(tf_config);
|
||||
let config = TrainerConfig {
|
||||
epochs: 2, batch_size: 4, lr: 0.001,
|
||||
warmup_epochs: 0, early_stop_patience: 100,
|
||||
..Default::default()
|
||||
};
|
||||
let mut t = Trainer::with_transformer(config, transformer);
|
||||
|
||||
// The params should be the transformer's flattened weights
|
||||
assert!(t.params().len() > 100, "transformer should have many params");
|
||||
|
||||
// Create samples matching the transformer's n_subcarriers=8
|
||||
let samples: Vec<TrainingSample> = (0..8).map(|i| TrainingSample {
|
||||
csi_features: vec![vec![(i as f32 * 0.1).sin(); 8]; 4],
|
||||
target_keypoints: (0..17).map(|k| (k as f32 * 0.5, k as f32 * 0.3, 1.0)).collect(),
|
||||
target_body_parts: vec![0, 1, 2],
|
||||
target_uv: (vec![0.5; 3], vec![0.5; 3]),
|
||||
}).collect();
|
||||
|
||||
let stats = t.train_epoch(&samples);
|
||||
assert!(stats.train_loss.is_finite(), "loss should be finite");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trainer_with_transformer_loss_finite_after_training() {
|
||||
use crate::graph_transformer::{CsiToPoseTransformer, TransformerConfig};
|
||||
let tf_config = TransformerConfig {
|
||||
n_subcarriers: 8, n_keypoints: 17, d_model: 8, n_heads: 2, n_gnn_layers: 1,
|
||||
};
|
||||
let transformer = CsiToPoseTransformer::new(tf_config);
|
||||
let config = TrainerConfig {
|
||||
epochs: 3, batch_size: 4, lr: 0.0001,
|
||||
warmup_epochs: 0, early_stop_patience: 100,
|
||||
..Default::default()
|
||||
};
|
||||
let mut t = Trainer::with_transformer(config, transformer);
|
||||
|
||||
let samples: Vec<TrainingSample> = (0..4).map(|i| TrainingSample {
|
||||
csi_features: vec![vec![(i as f32 * 0.2).sin(); 8]; 4],
|
||||
target_keypoints: (0..17).map(|k| (k as f32 * 0.5, k as f32 * 0.3, 1.0)).collect(),
|
||||
target_body_parts: vec![],
|
||||
target_uv: (vec![], vec![]),
|
||||
}).collect();
|
||||
|
||||
let result = t.run_training(&samples, &[]);
|
||||
assert!(result.history.iter().all(|s| s.train_loss.is_finite()),
|
||||
"all losses should be finite");
|
||||
|
||||
// Sync weights back and verify transformer still works
|
||||
t.sync_transformer_weights();
|
||||
if let Some(tf) = t.transformer() {
|
||||
let out = tf.forward(&vec![vec![1.0; 8]; 4]);
|
||||
assert_eq!(out.keypoints.len(), 17);
|
||||
for (i, &(x, y, z)) in out.keypoints.iter().enumerate() {
|
||||
assert!(x.is_finite() && y.is_finite() && z.is_finite(),
|
||||
"kp {i} not finite after training");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user