fix: Clean up Rust warnings and add Python vital signs detection

Rust changes:
- Fix unused variable warnings in wifi-densepose-nn (densepose.rs, inference.rs, tensor.rs, translator.rs)
- Remove unused imports in wifi-densepose-mat (breathing.rs, pipeline.rs, csi_receiver.rs, debris_model.rs, vital_signs_classifier.rs)
- All tests continue to pass

Python changes:
- Add vital_signs.py module with BreathingDetector and HeartbeatDetector classes
- Mirror Rust wifi-densepose-mat detection functionality
- Update v1 package version to 1.2.0
- Export new vital signs classes from core module
This commit is contained in:
Claude
2026-01-14 17:42:37 +00:00
parent 16c50abca3
commit 7c00482314
13 changed files with 583 additions and 23 deletions

View File

@@ -252,7 +252,7 @@ impl DensePoseHead {
})?;
let input_arr = input.as_array4()?;
let (batch, _channels, height, width) = input_arr.dim();
let (_batch, _channels, _height, _width) = input_arr.dim();
// Apply shared convolutions
let mut current = input_arr.clone();

View File

@@ -206,7 +206,7 @@ impl Backend for MockBackend {
self.output_shapes.get(name).cloned()
}
fn run(&self, inputs: HashMap<String, Tensor>) -> NnResult<HashMap<String, Tensor>> {
fn run(&self, _inputs: HashMap<String, Tensor>) -> NnResult<HashMap<String, Tensor>> {
let mut outputs = HashMap::new();
for (name, shape) in &self.output_shapes {

View File

@@ -266,7 +266,7 @@ impl Tensor {
}
/// Apply softmax along axis
pub fn softmax(&self, axis: usize) -> NnResult<Tensor> {
pub fn softmax(&self, _axis: usize) -> NnResult<Tensor> {
match self {
Tensor::Float4D(a) => {
let max = a.fold(f32::NEG_INFINITY, |acc, &x| acc.max(x));

View File

@@ -342,7 +342,7 @@ impl ModalityTranslator {
})?;
let input_arr = input.as_array4()?;
let (batch, _channels, height, width) = input_arr.dim();
let (_batch, _channels, _height, _width) = input_arr.dim();
// Encode
let mut encoder_outputs = Vec::new();
@@ -461,7 +461,7 @@ impl ModalityTranslator {
weights: &ConvBlockWeights,
) -> NnResult<Array4<f32>> {
let (batch, in_channels, in_height, in_width) = input.dim();
let (out_channels, _, kernel_h, kernel_w) = weights.conv_weight.dim();
let (out_channels, _, _kernel_h, _kernel_w) = weights.conv_weight.dim();
// Upsample 2x
let out_height = in_height * 2;
@@ -536,7 +536,7 @@ impl ModalityTranslator {
fn apply_attention(
&self,
input: &Array4<f32>,
weights: &AttentionWeights,
_weights: &AttentionWeights,
) -> NnResult<(Array4<f32>, Array4<f32>)> {
let (batch, channels, height, width) = input.dim();
let seq_len = height * width;