feat: Add commodity sensing, proof bundle, Three.js viz, mock isolation
Commodity Sensing Module (ADR-013): - sensing/rssi_collector.py: Real Linux WiFi RSSI collection from /proc/net/wireless and iw commands, with SimulatedCollector for testing - sensing/feature_extractor.py: FFT-based spectral analysis, CUSUM change-point detection, breathing/motion band power extraction - sensing/classifier.py: Rule-based presence/motion classification with confidence scoring and multi-receiver agreement - sensing/backend.py: Common SensingBackend protocol with honest capability reporting (PRESENCE + MOTION only for commodity) Proof of Reality Bundle (ADR-011): - data/proof/generate_reference_signal.py: Deterministic synthetic CSI with known breathing (0.3 Hz) and walking (1.2 Hz) signals - data/proof/sample_csi_data.json: Generated reference signal - data/proof/verify.py: One-command pipeline verification with SHA-256 - data/proof/expected_features.sha256: Expected output hash Three.js Visualization: - ui/components/scene.js: 3D scene setup with OrbitControls Mock Isolation: - testing/mock_pose_generator.py: Mock pose generation moved out of production pose_service.py - services/pose_service.py: Cleaned mock paths https://claude.ai/code/session_01Ki7pvEZtJDvqJkmyn6B714
This commit is contained in:
164
v1/src/sensing/backend.py
Normal file
164
v1/src/sensing/backend.py
Normal file
@@ -0,0 +1,164 @@
|
||||
"""
|
||||
Common sensing backend interface.
|
||||
|
||||
Defines the ``SensingBackend`` protocol and the ``CommodityBackend`` concrete
|
||||
implementation that wires together the RSSI collector, feature extractor, and
|
||||
classifier into a single coherent pipeline.
|
||||
|
||||
The ``Capability`` enum enumerates all possible sensing capabilities. The
|
||||
``CommodityBackend`` honestly reports that it supports only PRESENCE and MOTION.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from enum import Enum, auto
|
||||
from typing import List, Optional, Protocol, Set, runtime_checkable
|
||||
|
||||
from v1.src.sensing.classifier import MotionLevel, PresenceClassifier, SensingResult
|
||||
from v1.src.sensing.feature_extractor import RssiFeatureExtractor, RssiFeatures
|
||||
from v1.src.sensing.rssi_collector import (
|
||||
LinuxWifiCollector,
|
||||
SimulatedCollector,
|
||||
WifiCollector,
|
||||
WifiSample,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Capability enum
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class Capability(Enum):
|
||||
"""All possible sensing capabilities across backend tiers."""
|
||||
|
||||
PRESENCE = auto()
|
||||
MOTION = auto()
|
||||
RESPIRATION = auto()
|
||||
LOCATION = auto()
|
||||
POSE = auto()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Backend protocol
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@runtime_checkable
|
||||
class SensingBackend(Protocol):
|
||||
"""Protocol that all sensing backends must implement."""
|
||||
|
||||
def get_features(self) -> RssiFeatures:
|
||||
"""Extract current features from the sensing pipeline."""
|
||||
...
|
||||
|
||||
def get_capabilities(self) -> Set[Capability]:
|
||||
"""Return the set of capabilities this backend supports."""
|
||||
...
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Commodity backend
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class CommodityBackend:
|
||||
"""
|
||||
RSSI-based commodity sensing backend.
|
||||
|
||||
Wires together:
|
||||
- A WiFi collector (real or simulated)
|
||||
- An RSSI feature extractor
|
||||
- A presence/motion classifier
|
||||
|
||||
Capabilities: PRESENCE and MOTION only.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
collector : WifiCollector-compatible object
|
||||
The data source (LinuxWifiCollector or SimulatedCollector).
|
||||
extractor : RssiFeatureExtractor, optional
|
||||
Feature extractor (created with defaults if not provided).
|
||||
classifier : PresenceClassifier, optional
|
||||
Classifier (created with defaults if not provided).
|
||||
"""
|
||||
|
||||
SUPPORTED_CAPABILITIES: Set[Capability] = frozenset(
|
||||
{Capability.PRESENCE, Capability.MOTION}
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collector: LinuxWifiCollector | SimulatedCollector,
|
||||
extractor: Optional[RssiFeatureExtractor] = None,
|
||||
classifier: Optional[PresenceClassifier] = None,
|
||||
) -> None:
|
||||
self._collector = collector
|
||||
self._extractor = extractor or RssiFeatureExtractor()
|
||||
self._classifier = classifier or PresenceClassifier()
|
||||
|
||||
@property
|
||||
def collector(self) -> LinuxWifiCollector | SimulatedCollector:
|
||||
return self._collector
|
||||
|
||||
@property
|
||||
def extractor(self) -> RssiFeatureExtractor:
|
||||
return self._extractor
|
||||
|
||||
@property
|
||||
def classifier(self) -> PresenceClassifier:
|
||||
return self._classifier
|
||||
|
||||
# -- SensingBackend protocol ---------------------------------------------
|
||||
|
||||
def get_features(self) -> RssiFeatures:
|
||||
"""
|
||||
Get current features from the latest collected samples.
|
||||
|
||||
Uses the extractor's window_seconds to determine how many samples
|
||||
to pull from the collector's ring buffer.
|
||||
"""
|
||||
window = self._extractor.window_seconds
|
||||
sample_rate = self._collector.sample_rate_hz
|
||||
n_needed = int(window * sample_rate)
|
||||
samples = self._collector.get_samples(n=n_needed)
|
||||
return self._extractor.extract(samples)
|
||||
|
||||
def get_capabilities(self) -> Set[Capability]:
|
||||
"""CommodityBackend supports PRESENCE and MOTION only."""
|
||||
return set(self.SUPPORTED_CAPABILITIES)
|
||||
|
||||
# -- convenience methods -------------------------------------------------
|
||||
|
||||
def get_result(self) -> SensingResult:
|
||||
"""
|
||||
Run the full pipeline: collect -> extract -> classify.
|
||||
|
||||
Returns
|
||||
-------
|
||||
SensingResult
|
||||
Classification result with motion level and confidence.
|
||||
"""
|
||||
features = self.get_features()
|
||||
return self._classifier.classify(features)
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start the underlying collector."""
|
||||
self._collector.start()
|
||||
logger.info(
|
||||
"CommodityBackend started (capabilities: %s)",
|
||||
", ".join(c.name for c in self.SUPPORTED_CAPABILITIES),
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop the underlying collector."""
|
||||
self._collector.stop()
|
||||
logger.info("CommodityBackend stopped")
|
||||
|
||||
def is_capable(self, capability: Capability) -> bool:
|
||||
"""Check whether this backend supports a specific capability."""
|
||||
return capability in self.SUPPORTED_CAPABILITIES
|
||||
|
||||
def __repr__(self) -> str:
|
||||
caps = ", ".join(c.name for c in sorted(self.SUPPORTED_CAPABILITIES, key=lambda c: c.value))
|
||||
return f"CommodityBackend(capabilities=[{caps}])"
|
||||
201
v1/src/sensing/classifier.py
Normal file
201
v1/src/sensing/classifier.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""
|
||||
Presence and motion classification from RSSI features.
|
||||
|
||||
Uses rule-based logic with configurable thresholds to classify the current
|
||||
sensing state into one of three motion levels:
|
||||
ABSENT -- no person detected
|
||||
PRESENT_STILL -- person present but stationary
|
||||
ACTIVE -- person present and moving
|
||||
|
||||
Confidence is derived from spectral feature strength and optional
|
||||
cross-receiver agreement.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
from v1.src.sensing.feature_extractor import RssiFeatures
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MotionLevel(Enum):
|
||||
"""Classified motion state."""
|
||||
|
||||
ABSENT = "absent"
|
||||
PRESENT_STILL = "present_still"
|
||||
ACTIVE = "active"
|
||||
|
||||
|
||||
@dataclass
|
||||
class SensingResult:
|
||||
"""Output of the presence/motion classifier."""
|
||||
|
||||
motion_level: MotionLevel
|
||||
confidence: float # 0.0 to 1.0
|
||||
presence_detected: bool
|
||||
rssi_variance: float
|
||||
motion_band_energy: float
|
||||
breathing_band_energy: float
|
||||
n_change_points: int
|
||||
details: str = ""
|
||||
|
||||
|
||||
class PresenceClassifier:
|
||||
"""
|
||||
Rule-based presence and motion classifier.
|
||||
|
||||
Classification rules
|
||||
--------------------
|
||||
1. **Presence**: RSSI variance exceeds ``presence_variance_threshold``.
|
||||
2. **Motion level**:
|
||||
- ABSENT if variance < presence threshold
|
||||
- ACTIVE if variance >= presence threshold AND motion band energy
|
||||
exceeds ``motion_energy_threshold``
|
||||
- PRESENT_STILL otherwise (variance above threshold but low motion energy)
|
||||
|
||||
Confidence model
|
||||
----------------
|
||||
Base confidence comes from how far the measured variance / energy exceeds
|
||||
the respective thresholds. Cross-receiver agreement (when multiple
|
||||
receivers report results) can boost confidence further.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
presence_variance_threshold : float
|
||||
Minimum RSSI variance (dBm^2) to declare presence (default 0.5).
|
||||
motion_energy_threshold : float
|
||||
Minimum motion-band spectral energy to classify as ACTIVE (default 0.1).
|
||||
max_receivers : int
|
||||
Maximum number of receivers for cross-receiver agreement (default 1).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
presence_variance_threshold: float = 0.5,
|
||||
motion_energy_threshold: float = 0.1,
|
||||
max_receivers: int = 1,
|
||||
) -> None:
|
||||
self._var_thresh = presence_variance_threshold
|
||||
self._motion_thresh = motion_energy_threshold
|
||||
self._max_receivers = max_receivers
|
||||
|
||||
@property
|
||||
def presence_variance_threshold(self) -> float:
|
||||
return self._var_thresh
|
||||
|
||||
@property
|
||||
def motion_energy_threshold(self) -> float:
|
||||
return self._motion_thresh
|
||||
|
||||
def classify(
|
||||
self,
|
||||
features: RssiFeatures,
|
||||
other_receiver_results: Optional[List[SensingResult]] = None,
|
||||
) -> SensingResult:
|
||||
"""
|
||||
Classify presence and motion from extracted RSSI features.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
features : RssiFeatures
|
||||
Features extracted from the RSSI time series of one receiver.
|
||||
other_receiver_results : list of SensingResult, optional
|
||||
Results from other receivers for cross-receiver agreement.
|
||||
|
||||
Returns
|
||||
-------
|
||||
SensingResult
|
||||
"""
|
||||
variance = features.variance
|
||||
motion_energy = features.motion_band_power
|
||||
breathing_energy = features.breathing_band_power
|
||||
|
||||
# -- presence decision ------------------------------------------------
|
||||
presence = variance >= self._var_thresh
|
||||
|
||||
# -- motion level -----------------------------------------------------
|
||||
if not presence:
|
||||
level = MotionLevel.ABSENT
|
||||
elif motion_energy >= self._motion_thresh:
|
||||
level = MotionLevel.ACTIVE
|
||||
else:
|
||||
level = MotionLevel.PRESENT_STILL
|
||||
|
||||
# -- confidence -------------------------------------------------------
|
||||
confidence = self._compute_confidence(
|
||||
variance, motion_energy, breathing_energy, level, other_receiver_results
|
||||
)
|
||||
|
||||
# -- detail string ----------------------------------------------------
|
||||
details = (
|
||||
f"var={variance:.4f} (thresh={self._var_thresh}), "
|
||||
f"motion_energy={motion_energy:.4f} (thresh={self._motion_thresh}), "
|
||||
f"breathing_energy={breathing_energy:.4f}, "
|
||||
f"change_points={features.n_change_points}"
|
||||
)
|
||||
|
||||
return SensingResult(
|
||||
motion_level=level,
|
||||
confidence=confidence,
|
||||
presence_detected=presence,
|
||||
rssi_variance=variance,
|
||||
motion_band_energy=motion_energy,
|
||||
breathing_band_energy=breathing_energy,
|
||||
n_change_points=features.n_change_points,
|
||||
details=details,
|
||||
)
|
||||
|
||||
def _compute_confidence(
|
||||
self,
|
||||
variance: float,
|
||||
motion_energy: float,
|
||||
breathing_energy: float,
|
||||
level: MotionLevel,
|
||||
other_results: Optional[List[SensingResult]],
|
||||
) -> float:
|
||||
"""
|
||||
Compute a confidence score in [0, 1].
|
||||
|
||||
The score is composed of:
|
||||
- Base (60%): how clearly the variance exceeds (or falls below) the
|
||||
presence threshold.
|
||||
- Spectral (20%): strength of the relevant spectral band.
|
||||
- Agreement (20%): cross-receiver consensus (if available).
|
||||
"""
|
||||
# -- base confidence (0..1) ------------------------------------------
|
||||
if level == MotionLevel.ABSENT:
|
||||
# Confidence in absence increases as variance shrinks relative to threshold
|
||||
if self._var_thresh > 0:
|
||||
base = max(0.0, 1.0 - variance / self._var_thresh)
|
||||
else:
|
||||
base = 1.0
|
||||
else:
|
||||
# Confidence in presence increases as variance exceeds threshold
|
||||
ratio = variance / self._var_thresh if self._var_thresh > 0 else 10.0
|
||||
base = min(1.0, ratio)
|
||||
|
||||
# -- spectral confidence (0..1) --------------------------------------
|
||||
if level == MotionLevel.ACTIVE:
|
||||
spectral = min(1.0, motion_energy / max(self._motion_thresh, 1e-12))
|
||||
elif level == MotionLevel.PRESENT_STILL:
|
||||
# For still, breathing band energy is more relevant
|
||||
spectral = min(1.0, breathing_energy / max(self._motion_thresh, 1e-12))
|
||||
else:
|
||||
spectral = 1.0 # No spectral requirement for absence
|
||||
|
||||
# -- cross-receiver agreement (0..1) ---------------------------------
|
||||
agreement = 1.0 # default: single receiver
|
||||
if other_results:
|
||||
same_level = sum(
|
||||
1 for r in other_results if r.motion_level == level
|
||||
)
|
||||
agreement = (same_level + 1) / (len(other_results) + 1)
|
||||
|
||||
# Weighted combination
|
||||
confidence = 0.6 * base + 0.2 * spectral + 0.2 * agreement
|
||||
return max(0.0, min(1.0, confidence))
|
||||
312
v1/src/sensing/feature_extractor.py
Normal file
312
v1/src/sensing/feature_extractor.py
Normal file
@@ -0,0 +1,312 @@
|
||||
"""
|
||||
Signal feature extraction from RSSI time series.
|
||||
|
||||
Extracts both time-domain statistical features and frequency-domain spectral
|
||||
features using real mathematics (scipy.fft, scipy.stats). Also implements
|
||||
CUSUM change-point detection for abrupt RSSI transitions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import NDArray
|
||||
from scipy import fft as scipy_fft
|
||||
from scipy import stats as scipy_stats
|
||||
|
||||
from v1.src.sensing.rssi_collector import WifiSample
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Feature dataclass
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass
|
||||
class RssiFeatures:
|
||||
"""Container for all extracted RSSI features."""
|
||||
|
||||
# -- time-domain --------------------------------------------------------
|
||||
mean: float = 0.0
|
||||
variance: float = 0.0
|
||||
std: float = 0.0
|
||||
skewness: float = 0.0
|
||||
kurtosis: float = 0.0
|
||||
range: float = 0.0
|
||||
iqr: float = 0.0 # inter-quartile range
|
||||
|
||||
# -- frequency-domain ---------------------------------------------------
|
||||
dominant_freq_hz: float = 0.0
|
||||
breathing_band_power: float = 0.0 # 0.1 - 0.5 Hz
|
||||
motion_band_power: float = 0.0 # 0.5 - 3.0 Hz
|
||||
total_spectral_power: float = 0.0
|
||||
|
||||
# -- change-point -------------------------------------------------------
|
||||
change_points: List[int] = field(default_factory=list)
|
||||
n_change_points: int = 0
|
||||
|
||||
# -- metadata -----------------------------------------------------------
|
||||
n_samples: int = 0
|
||||
duration_seconds: float = 0.0
|
||||
sample_rate_hz: float = 0.0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Feature extractor
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class RssiFeatureExtractor:
|
||||
"""
|
||||
Extract time-domain and frequency-domain features from an RSSI time series.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
window_seconds : float
|
||||
Length of the analysis window in seconds (default 30).
|
||||
cusum_threshold : float
|
||||
CUSUM threshold for change-point detection (default 3.0 standard deviations
|
||||
of the signal).
|
||||
cusum_drift : float
|
||||
CUSUM drift allowance (default 0.5 standard deviations).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
window_seconds: float = 30.0,
|
||||
cusum_threshold: float = 3.0,
|
||||
cusum_drift: float = 0.5,
|
||||
) -> None:
|
||||
self._window_seconds = window_seconds
|
||||
self._cusum_threshold = cusum_threshold
|
||||
self._cusum_drift = cusum_drift
|
||||
|
||||
@property
|
||||
def window_seconds(self) -> float:
|
||||
return self._window_seconds
|
||||
|
||||
def extract(self, samples: List[WifiSample]) -> RssiFeatures:
|
||||
"""
|
||||
Extract features from a list of WifiSample objects.
|
||||
|
||||
Only the most recent ``window_seconds`` of data are used.
|
||||
At least 4 samples are required for meaningful features.
|
||||
"""
|
||||
if len(samples) < 4:
|
||||
logger.warning(
|
||||
"Not enough samples for feature extraction (%d < 4)", len(samples)
|
||||
)
|
||||
return RssiFeatures(n_samples=len(samples))
|
||||
|
||||
# Trim to window
|
||||
samples = self._trim_to_window(samples)
|
||||
rssi = np.array([s.rssi_dbm for s in samples], dtype=np.float64)
|
||||
timestamps = np.array([s.timestamp for s in samples], dtype=np.float64)
|
||||
|
||||
# Estimate sample rate from actual timestamps
|
||||
dt = np.diff(timestamps)
|
||||
if len(dt) == 0 or np.mean(dt) <= 0:
|
||||
sample_rate = 10.0 # fallback
|
||||
else:
|
||||
sample_rate = 1.0 / np.mean(dt)
|
||||
|
||||
duration = timestamps[-1] - timestamps[0] if len(timestamps) > 1 else 0.0
|
||||
|
||||
# Build features
|
||||
features = RssiFeatures(
|
||||
n_samples=len(rssi),
|
||||
duration_seconds=float(duration),
|
||||
sample_rate_hz=float(sample_rate),
|
||||
)
|
||||
|
||||
self._compute_time_domain(rssi, features)
|
||||
self._compute_frequency_domain(rssi, sample_rate, features)
|
||||
self._compute_change_points(rssi, features)
|
||||
|
||||
return features
|
||||
|
||||
def extract_from_array(
|
||||
self, rssi: NDArray[np.float64], sample_rate_hz: float
|
||||
) -> RssiFeatures:
|
||||
"""
|
||||
Extract features directly from a numpy array (useful for testing).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rssi : ndarray
|
||||
1-D array of RSSI values in dBm.
|
||||
sample_rate_hz : float
|
||||
Sampling rate in Hz.
|
||||
"""
|
||||
if len(rssi) < 4:
|
||||
return RssiFeatures(n_samples=len(rssi))
|
||||
|
||||
duration = len(rssi) / sample_rate_hz
|
||||
|
||||
features = RssiFeatures(
|
||||
n_samples=len(rssi),
|
||||
duration_seconds=float(duration),
|
||||
sample_rate_hz=float(sample_rate_hz),
|
||||
)
|
||||
|
||||
self._compute_time_domain(rssi, features)
|
||||
self._compute_frequency_domain(rssi, sample_rate_hz, features)
|
||||
self._compute_change_points(rssi, features)
|
||||
|
||||
return features
|
||||
|
||||
# -- time-domain ---------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _compute_time_domain(rssi: NDArray[np.float64], features: RssiFeatures) -> None:
|
||||
features.mean = float(np.mean(rssi))
|
||||
features.variance = float(np.var(rssi, ddof=1)) if len(rssi) > 1 else 0.0
|
||||
features.std = float(np.std(rssi, ddof=1)) if len(rssi) > 1 else 0.0
|
||||
features.skewness = float(scipy_stats.skew(rssi, bias=False)) if len(rssi) > 2 else 0.0
|
||||
features.kurtosis = float(scipy_stats.kurtosis(rssi, bias=False)) if len(rssi) > 3 else 0.0
|
||||
features.range = float(np.ptp(rssi))
|
||||
|
||||
q75, q25 = np.percentile(rssi, [75, 25])
|
||||
features.iqr = float(q75 - q25)
|
||||
|
||||
# -- frequency-domain ----------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _compute_frequency_domain(
|
||||
rssi: NDArray[np.float64],
|
||||
sample_rate: float,
|
||||
features: RssiFeatures,
|
||||
) -> None:
|
||||
"""Compute one-sided FFT power spectrum and extract band powers."""
|
||||
n = len(rssi)
|
||||
if n < 4:
|
||||
return
|
||||
|
||||
# Remove DC (subtract mean)
|
||||
signal = rssi - np.mean(rssi)
|
||||
|
||||
# Apply Hann window to reduce spectral leakage
|
||||
window = np.hanning(n)
|
||||
windowed = signal * window
|
||||
|
||||
# Compute real FFT
|
||||
fft_vals = scipy_fft.rfft(windowed)
|
||||
freqs = scipy_fft.rfftfreq(n, d=1.0 / sample_rate)
|
||||
|
||||
# Power spectral density (magnitude squared, normalised by N)
|
||||
psd = (np.abs(fft_vals) ** 2) / n
|
||||
|
||||
# Skip DC component (index 0)
|
||||
if len(freqs) > 1:
|
||||
freqs_no_dc = freqs[1:]
|
||||
psd_no_dc = psd[1:]
|
||||
else:
|
||||
return
|
||||
|
||||
# Total spectral power
|
||||
features.total_spectral_power = float(np.sum(psd_no_dc))
|
||||
|
||||
# Dominant frequency
|
||||
if len(psd_no_dc) > 0:
|
||||
peak_idx = int(np.argmax(psd_no_dc))
|
||||
features.dominant_freq_hz = float(freqs_no_dc[peak_idx])
|
||||
|
||||
# Band powers
|
||||
features.breathing_band_power = float(
|
||||
_band_power(freqs_no_dc, psd_no_dc, 0.1, 0.5)
|
||||
)
|
||||
features.motion_band_power = float(
|
||||
_band_power(freqs_no_dc, psd_no_dc, 0.5, 3.0)
|
||||
)
|
||||
|
||||
# -- change-point detection (CUSUM) --------------------------------------
|
||||
|
||||
def _compute_change_points(
|
||||
self, rssi: NDArray[np.float64], features: RssiFeatures
|
||||
) -> None:
|
||||
"""
|
||||
Detect change points using the CUSUM algorithm.
|
||||
|
||||
The CUSUM statistic tracks cumulative deviations from the mean,
|
||||
flagging points where the signal mean shifts abruptly.
|
||||
"""
|
||||
if len(rssi) < 4:
|
||||
return
|
||||
|
||||
mean_val = np.mean(rssi)
|
||||
std_val = np.std(rssi, ddof=1)
|
||||
if std_val < 1e-12:
|
||||
features.change_points = []
|
||||
features.n_change_points = 0
|
||||
return
|
||||
|
||||
threshold = self._cusum_threshold * std_val
|
||||
drift = self._cusum_drift * std_val
|
||||
|
||||
change_points = cusum_detect(rssi, mean_val, threshold, drift)
|
||||
features.change_points = change_points
|
||||
features.n_change_points = len(change_points)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper functions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _band_power(
|
||||
freqs: NDArray[np.float64],
|
||||
psd: NDArray[np.float64],
|
||||
low_hz: float,
|
||||
high_hz: float,
|
||||
) -> float:
|
||||
"""Sum PSD within a frequency band [low_hz, high_hz]."""
|
||||
mask = (freqs >= low_hz) & (freqs <= high_hz)
|
||||
return float(np.sum(psd[mask]))
|
||||
|
||||
|
||||
def cusum_detect(
|
||||
signal: NDArray[np.float64],
|
||||
target: float,
|
||||
threshold: float,
|
||||
drift: float,
|
||||
) -> List[int]:
|
||||
"""
|
||||
CUSUM (cumulative sum) change-point detection.
|
||||
|
||||
Detects both upward and downward shifts in the signal mean.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
signal : ndarray
|
||||
The 1-D signal to analyse.
|
||||
target : float
|
||||
Expected mean of the signal.
|
||||
threshold : float
|
||||
Decision threshold for declaring a change point.
|
||||
drift : float
|
||||
Allowable drift before accumulating deviation.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list of int
|
||||
Indices where change points were detected.
|
||||
"""
|
||||
n = len(signal)
|
||||
s_pos = 0.0
|
||||
s_neg = 0.0
|
||||
change_points: List[int] = []
|
||||
|
||||
for i in range(n):
|
||||
deviation = signal[i] - target
|
||||
s_pos = max(0.0, s_pos + deviation - drift)
|
||||
s_neg = max(0.0, s_neg - deviation - drift)
|
||||
|
||||
if s_pos > threshold or s_neg > threshold:
|
||||
change_points.append(i)
|
||||
# Reset after detection to find subsequent changes
|
||||
s_pos = 0.0
|
||||
s_neg = 0.0
|
||||
|
||||
return change_points
|
||||
446
v1/src/sensing/rssi_collector.py
Normal file
446
v1/src/sensing/rssi_collector.py
Normal file
@@ -0,0 +1,446 @@
|
||||
"""
|
||||
RSSI data collection from Linux WiFi interfaces.
|
||||
|
||||
Provides two concrete collectors:
|
||||
- LinuxWifiCollector: reads real RSSI from /proc/net/wireless and iw commands
|
||||
- SimulatedCollector: produces deterministic synthetic signals for testing
|
||||
|
||||
Both share the same WifiSample dataclass and thread-safe ring buffer.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
from collections import deque
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Deque, List, Optional, Protocol
|
||||
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Data types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class WifiSample:
|
||||
"""A single WiFi measurement sample."""
|
||||
|
||||
timestamp: float # UNIX epoch seconds (time.time())
|
||||
rssi_dbm: float # Received signal strength in dBm
|
||||
noise_dbm: float # Noise floor in dBm
|
||||
link_quality: float # Link quality 0-1 (normalised)
|
||||
tx_bytes: int # Cumulative TX bytes
|
||||
rx_bytes: int # Cumulative RX bytes
|
||||
retry_count: int # Cumulative retry count
|
||||
interface: str # WiFi interface name
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Thread-safe ring buffer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class RingBuffer:
|
||||
"""Thread-safe fixed-size ring buffer for WifiSample objects."""
|
||||
|
||||
def __init__(self, max_size: int) -> None:
|
||||
self._buf: Deque[WifiSample] = deque(maxlen=max_size)
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def append(self, sample: WifiSample) -> None:
|
||||
with self._lock:
|
||||
self._buf.append(sample)
|
||||
|
||||
def get_all(self) -> List[WifiSample]:
|
||||
"""Return a snapshot of all samples (oldest first)."""
|
||||
with self._lock:
|
||||
return list(self._buf)
|
||||
|
||||
def get_last_n(self, n: int) -> List[WifiSample]:
|
||||
"""Return the most recent *n* samples."""
|
||||
with self._lock:
|
||||
items = list(self._buf)
|
||||
return items[-n:] if n < len(items) else items
|
||||
|
||||
def __len__(self) -> int:
|
||||
with self._lock:
|
||||
return len(self._buf)
|
||||
|
||||
def clear(self) -> None:
|
||||
with self._lock:
|
||||
self._buf.clear()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Collector protocol
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class WifiCollector(Protocol):
|
||||
"""Protocol that all WiFi collectors must satisfy."""
|
||||
|
||||
def start(self) -> None: ...
|
||||
def stop(self) -> None: ...
|
||||
def get_samples(self, n: Optional[int] = None) -> List[WifiSample]: ...
|
||||
@property
|
||||
def sample_rate_hz(self) -> float: ...
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Linux WiFi collector (real hardware)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class LinuxWifiCollector:
|
||||
"""
|
||||
Collects real RSSI data from a Linux WiFi interface.
|
||||
|
||||
Data sources:
|
||||
- /proc/net/wireless (RSSI, noise, link quality)
|
||||
- iw dev <iface> station dump (TX/RX bytes, retry count)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
interface : str
|
||||
WiFi interface name, e.g. ``"wlan0"``.
|
||||
sample_rate_hz : float
|
||||
Target sampling rate in Hz (default 10).
|
||||
buffer_seconds : int
|
||||
How many seconds of history to keep in the ring buffer (default 120).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
interface: str = "wlan0",
|
||||
sample_rate_hz: float = 10.0,
|
||||
buffer_seconds: int = 120,
|
||||
) -> None:
|
||||
self._interface = interface
|
||||
self._rate = sample_rate_hz
|
||||
self._buffer = RingBuffer(max_size=int(sample_rate_hz * buffer_seconds))
|
||||
self._running = False
|
||||
self._thread: Optional[threading.Thread] = None
|
||||
|
||||
# -- public API ----------------------------------------------------------
|
||||
|
||||
@property
|
||||
def sample_rate_hz(self) -> float:
|
||||
return self._rate
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start the background sampling thread."""
|
||||
if self._running:
|
||||
return
|
||||
self._validate_interface()
|
||||
self._running = True
|
||||
self._thread = threading.Thread(
|
||||
target=self._sample_loop, daemon=True, name="wifi-rssi-collector"
|
||||
)
|
||||
self._thread.start()
|
||||
logger.info(
|
||||
"LinuxWifiCollector started on %s at %.1f Hz",
|
||||
self._interface,
|
||||
self._rate,
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop the background sampling thread."""
|
||||
self._running = False
|
||||
if self._thread is not None:
|
||||
self._thread.join(timeout=2.0)
|
||||
self._thread = None
|
||||
logger.info("LinuxWifiCollector stopped")
|
||||
|
||||
def get_samples(self, n: Optional[int] = None) -> List[WifiSample]:
|
||||
"""
|
||||
Return collected samples.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : int or None
|
||||
If given, return only the most recent *n* samples.
|
||||
"""
|
||||
if n is not None:
|
||||
return self._buffer.get_last_n(n)
|
||||
return self._buffer.get_all()
|
||||
|
||||
def collect_once(self) -> WifiSample:
|
||||
"""Collect a single sample right now (blocking)."""
|
||||
return self._read_sample()
|
||||
|
||||
# -- internals -----------------------------------------------------------
|
||||
|
||||
def _validate_interface(self) -> None:
|
||||
"""Check that the interface exists on this machine."""
|
||||
try:
|
||||
with open("/proc/net/wireless", "r") as f:
|
||||
content = f.read()
|
||||
if self._interface not in content:
|
||||
raise RuntimeError(
|
||||
f"WiFi interface '{self._interface}' not found in "
|
||||
f"/proc/net/wireless. Available interfaces may include: "
|
||||
f"{self._parse_interface_names(content)}. "
|
||||
f"Ensure the interface is up and associated with an AP."
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise RuntimeError(
|
||||
"Cannot read /proc/net/wireless. "
|
||||
"This collector requires a Linux system with wireless-extensions support. "
|
||||
"If running in a container or VM without WiFi hardware, use "
|
||||
"SimulatedCollector instead."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_interface_names(proc_content: str) -> List[str]:
|
||||
"""Extract interface names from /proc/net/wireless content."""
|
||||
names: List[str] = []
|
||||
for line in proc_content.splitlines()[2:]: # skip header lines
|
||||
parts = line.split(":")
|
||||
if len(parts) >= 2:
|
||||
names.append(parts[0].strip())
|
||||
return names
|
||||
|
||||
def _sample_loop(self) -> None:
|
||||
interval = 1.0 / self._rate
|
||||
while self._running:
|
||||
t0 = time.monotonic()
|
||||
try:
|
||||
sample = self._read_sample()
|
||||
self._buffer.append(sample)
|
||||
except Exception:
|
||||
logger.exception("Error reading WiFi sample")
|
||||
elapsed = time.monotonic() - t0
|
||||
sleep_time = max(0.0, interval - elapsed)
|
||||
if sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
def _read_sample(self) -> WifiSample:
|
||||
"""Read one sample from the OS."""
|
||||
rssi, noise, quality = self._read_proc_wireless()
|
||||
tx_bytes, rx_bytes, retries = self._read_iw_station()
|
||||
return WifiSample(
|
||||
timestamp=time.time(),
|
||||
rssi_dbm=rssi,
|
||||
noise_dbm=noise,
|
||||
link_quality=quality,
|
||||
tx_bytes=tx_bytes,
|
||||
rx_bytes=rx_bytes,
|
||||
retry_count=retries,
|
||||
interface=self._interface,
|
||||
)
|
||||
|
||||
def _read_proc_wireless(self) -> tuple[float, float, float]:
|
||||
"""Parse /proc/net/wireless for the configured interface."""
|
||||
try:
|
||||
with open("/proc/net/wireless", "r") as f:
|
||||
for line in f:
|
||||
if self._interface in line:
|
||||
# Format: iface: status quality signal noise ...
|
||||
parts = line.split()
|
||||
# parts[0] = "wlan0:", parts[2]=quality, parts[3]=signal, parts[4]=noise
|
||||
quality_raw = float(parts[2].rstrip("."))
|
||||
signal_raw = float(parts[3].rstrip("."))
|
||||
noise_raw = float(parts[4].rstrip("."))
|
||||
# Normalise quality to 0..1 (max is typically 70)
|
||||
quality = min(1.0, max(0.0, quality_raw / 70.0))
|
||||
return signal_raw, noise_raw, quality
|
||||
except (FileNotFoundError, IndexError, ValueError) as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to read /proc/net/wireless for {self._interface}: {exc}"
|
||||
) from exc
|
||||
raise RuntimeError(
|
||||
f"Interface {self._interface} not found in /proc/net/wireless"
|
||||
)
|
||||
|
||||
def _read_iw_station(self) -> tuple[int, int, int]:
|
||||
"""Run ``iw dev <iface> station dump`` and parse TX/RX/retries."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["iw", "dev", self._interface, "station", "dump"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2.0,
|
||||
)
|
||||
text = result.stdout
|
||||
|
||||
tx_bytes = self._extract_int(text, r"tx bytes:\s*(\d+)")
|
||||
rx_bytes = self._extract_int(text, r"rx bytes:\s*(\d+)")
|
||||
retries = self._extract_int(text, r"tx retries:\s*(\d+)")
|
||||
return tx_bytes, rx_bytes, retries
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
# iw not installed or timed out -- degrade gracefully
|
||||
return 0, 0, 0
|
||||
|
||||
@staticmethod
|
||||
def _extract_int(text: str, pattern: str) -> int:
|
||||
m = re.search(pattern, text)
|
||||
return int(m.group(1)) if m else 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Simulated collector (deterministic, for testing)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class SimulatedCollector:
|
||||
"""
|
||||
Deterministic simulated WiFi collector for testing.
|
||||
|
||||
Generates a synthetic RSSI signal composed of:
|
||||
- A constant baseline (-50 dBm default)
|
||||
- An optional sinusoidal component (configurable frequency/amplitude)
|
||||
- Optional step-change injection (for change-point testing)
|
||||
- Deterministic noise from a seeded PRNG
|
||||
|
||||
This is explicitly a test/development tool and makes no attempt to
|
||||
appear as real hardware.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
seed : int
|
||||
Random seed for deterministic output.
|
||||
sample_rate_hz : float
|
||||
Target sampling rate in Hz (default 10).
|
||||
buffer_seconds : int
|
||||
Ring buffer capacity in seconds (default 120).
|
||||
baseline_dbm : float
|
||||
RSSI baseline in dBm (default -50).
|
||||
sine_freq_hz : float
|
||||
Frequency of the sinusoidal RSSI component (default 0.3 Hz, breathing band).
|
||||
sine_amplitude_dbm : float
|
||||
Amplitude of the sinusoidal component (default 2.0 dBm).
|
||||
noise_std_dbm : float
|
||||
Standard deviation of additive Gaussian noise (default 0.5 dBm).
|
||||
step_change_at : float or None
|
||||
If set, inject a step change of ``step_change_dbm`` at this time offset
|
||||
(seconds from start).
|
||||
step_change_dbm : float
|
||||
Magnitude of the step change (default -10 dBm).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
seed: int = 42,
|
||||
sample_rate_hz: float = 10.0,
|
||||
buffer_seconds: int = 120,
|
||||
baseline_dbm: float = -50.0,
|
||||
sine_freq_hz: float = 0.3,
|
||||
sine_amplitude_dbm: float = 2.0,
|
||||
noise_std_dbm: float = 0.5,
|
||||
step_change_at: Optional[float] = None,
|
||||
step_change_dbm: float = -10.0,
|
||||
) -> None:
|
||||
self._rate = sample_rate_hz
|
||||
self._buffer = RingBuffer(max_size=int(sample_rate_hz * buffer_seconds))
|
||||
self._rng = np.random.default_rng(seed)
|
||||
|
||||
self._baseline = baseline_dbm
|
||||
self._sine_freq = sine_freq_hz
|
||||
self._sine_amp = sine_amplitude_dbm
|
||||
self._noise_std = noise_std_dbm
|
||||
self._step_at = step_change_at
|
||||
self._step_dbm = step_change_dbm
|
||||
|
||||
self._running = False
|
||||
self._thread: Optional[threading.Thread] = None
|
||||
self._start_time: float = 0.0
|
||||
self._sample_index: int = 0
|
||||
|
||||
# -- public API ----------------------------------------------------------
|
||||
|
||||
@property
|
||||
def sample_rate_hz(self) -> float:
|
||||
return self._rate
|
||||
|
||||
def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
self._running = True
|
||||
self._start_time = time.time()
|
||||
self._sample_index = 0
|
||||
self._thread = threading.Thread(
|
||||
target=self._sample_loop, daemon=True, name="sim-rssi-collector"
|
||||
)
|
||||
self._thread.start()
|
||||
logger.info("SimulatedCollector started at %.1f Hz (seed reused from init)", self._rate)
|
||||
|
||||
def stop(self) -> None:
|
||||
self._running = False
|
||||
if self._thread is not None:
|
||||
self._thread.join(timeout=2.0)
|
||||
self._thread = None
|
||||
|
||||
def get_samples(self, n: Optional[int] = None) -> List[WifiSample]:
|
||||
if n is not None:
|
||||
return self._buffer.get_last_n(n)
|
||||
return self._buffer.get_all()
|
||||
|
||||
def generate_samples(self, duration_seconds: float) -> List[WifiSample]:
|
||||
"""
|
||||
Generate a batch of samples without the background thread.
|
||||
|
||||
Useful for unit tests that need a known signal without timing jitter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
duration_seconds : float
|
||||
How many seconds of signal to produce.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list of WifiSample
|
||||
"""
|
||||
n_samples = int(duration_seconds * self._rate)
|
||||
samples: List[WifiSample] = []
|
||||
base_time = time.time()
|
||||
for i in range(n_samples):
|
||||
t = i / self._rate
|
||||
sample = self._make_sample(base_time + t, t, i)
|
||||
samples.append(sample)
|
||||
return samples
|
||||
|
||||
# -- internals -----------------------------------------------------------
|
||||
|
||||
def _sample_loop(self) -> None:
|
||||
interval = 1.0 / self._rate
|
||||
while self._running:
|
||||
t0 = time.monotonic()
|
||||
now = time.time()
|
||||
t_offset = now - self._start_time
|
||||
sample = self._make_sample(now, t_offset, self._sample_index)
|
||||
self._buffer.append(sample)
|
||||
self._sample_index += 1
|
||||
elapsed = time.monotonic() - t0
|
||||
sleep_time = max(0.0, interval - elapsed)
|
||||
if sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
def _make_sample(self, timestamp: float, t_offset: float, index: int) -> WifiSample:
|
||||
"""Build one deterministic sample."""
|
||||
# Sinusoidal component
|
||||
sine = self._sine_amp * math.sin(2.0 * math.pi * self._sine_freq * t_offset)
|
||||
|
||||
# Deterministic Gaussian noise (uses the seeded RNG)
|
||||
noise = self._rng.normal(0.0, self._noise_std)
|
||||
|
||||
# Step change
|
||||
step = 0.0
|
||||
if self._step_at is not None and t_offset >= self._step_at:
|
||||
step = self._step_dbm
|
||||
|
||||
rssi = self._baseline + sine + noise + step
|
||||
|
||||
return WifiSample(
|
||||
timestamp=timestamp,
|
||||
rssi_dbm=float(rssi),
|
||||
noise_dbm=-95.0,
|
||||
link_quality=max(0.0, min(1.0, (rssi + 100.0) / 60.0)),
|
||||
tx_bytes=index * 1500,
|
||||
rx_bytes=index * 3000,
|
||||
retry_count=max(0, index // 100),
|
||||
interface="sim0",
|
||||
)
|
||||
@@ -1,5 +1,9 @@
|
||||
"""
|
||||
Pose estimation service for WiFi-DensePose API
|
||||
Pose estimation service for WiFi-DensePose API.
|
||||
|
||||
Production paths in this module must NEVER use random data generation.
|
||||
All mock/synthetic data generation is isolated in src.testing and is only
|
||||
invoked when settings.mock_pose_data is explicitly True.
|
||||
"""
|
||||
|
||||
import logging
|
||||
@@ -266,97 +270,153 @@ class PoseService:
|
||||
return []
|
||||
|
||||
def _parse_pose_outputs(self, outputs: torch.Tensor) -> List[Dict[str, Any]]:
|
||||
"""Parse neural network outputs into pose detections."""
|
||||
"""Parse neural network outputs into pose detections.
|
||||
|
||||
Extracts confidence, keypoints, bounding boxes, and activity from model
|
||||
output tensors. The exact interpretation depends on the model architecture;
|
||||
this implementation assumes the DensePoseHead output format.
|
||||
|
||||
Args:
|
||||
outputs: Model output tensor of shape (batch, features).
|
||||
|
||||
Returns:
|
||||
List of pose detection dictionaries.
|
||||
"""
|
||||
poses = []
|
||||
|
||||
# This is a simplified parsing - in reality, this would depend on the model architecture
|
||||
# For now, generate mock poses based on the output shape
|
||||
batch_size = outputs.shape[0]
|
||||
|
||||
|
||||
for i in range(batch_size):
|
||||
# Extract pose information (mock implementation)
|
||||
confidence = float(torch.sigmoid(outputs[i, 0]).item()) if outputs.shape[1] > 0 else 0.5
|
||||
|
||||
output_i = outputs[i] if len(outputs.shape) > 1 else outputs
|
||||
|
||||
# Extract confidence from first output channel
|
||||
confidence = float(torch.sigmoid(output_i[0]).item()) if output_i.shape[0] > 0 else 0.0
|
||||
|
||||
# Extract keypoints from model output if available
|
||||
keypoints = self._extract_keypoints_from_output(output_i)
|
||||
|
||||
# Extract bounding box from model output if available
|
||||
bounding_box = self._extract_bbox_from_output(output_i)
|
||||
|
||||
# Classify activity from features
|
||||
activity = self._classify_activity(output_i)
|
||||
|
||||
pose = {
|
||||
"person_id": i,
|
||||
"confidence": confidence,
|
||||
"keypoints": self._generate_keypoints(),
|
||||
"bounding_box": self._generate_bounding_box(),
|
||||
"activity": self._classify_activity(outputs[i] if len(outputs.shape) > 1 else outputs),
|
||||
"timestamp": datetime.now().isoformat()
|
||||
"keypoints": keypoints,
|
||||
"bounding_box": bounding_box,
|
||||
"activity": activity,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
|
||||
poses.append(pose)
|
||||
|
||||
|
||||
return poses
|
||||
|
||||
def _generate_mock_poses(self) -> List[Dict[str, Any]]:
|
||||
"""Generate mock pose data for development."""
|
||||
import random
|
||||
|
||||
num_persons = random.randint(1, min(3, self.settings.pose_max_persons))
|
||||
poses = []
|
||||
|
||||
for i in range(num_persons):
|
||||
confidence = random.uniform(0.3, 0.95)
|
||||
|
||||
pose = {
|
||||
"person_id": i,
|
||||
"confidence": confidence,
|
||||
"keypoints": self._generate_keypoints(),
|
||||
"bounding_box": self._generate_bounding_box(),
|
||||
"activity": random.choice(["standing", "sitting", "walking", "lying"]),
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
poses.append(pose)
|
||||
|
||||
return poses
|
||||
|
||||
def _generate_keypoints(self) -> List[Dict[str, Any]]:
|
||||
"""Generate keypoints for a person."""
|
||||
import random
|
||||
|
||||
|
||||
def _extract_keypoints_from_output(self, output: torch.Tensor) -> List[Dict[str, Any]]:
|
||||
"""Extract keypoints from a single person's model output.
|
||||
|
||||
Attempts to decode keypoint coordinates from the output tensor.
|
||||
If the tensor does not contain enough data for full keypoints,
|
||||
returns keypoints with zero coordinates and confidence derived
|
||||
from available data.
|
||||
|
||||
Args:
|
||||
output: Single-person output tensor.
|
||||
|
||||
Returns:
|
||||
List of keypoint dictionaries.
|
||||
"""
|
||||
keypoint_names = [
|
||||
"nose", "left_eye", "right_eye", "left_ear", "right_ear",
|
||||
"left_shoulder", "right_shoulder", "left_elbow", "right_elbow",
|
||||
"left_wrist", "right_wrist", "left_hip", "right_hip",
|
||||
"left_knee", "right_knee", "left_ankle", "right_ankle"
|
||||
"left_knee", "right_knee", "left_ankle", "right_ankle",
|
||||
]
|
||||
|
||||
|
||||
keypoints = []
|
||||
for name in keypoint_names:
|
||||
keypoints.append({
|
||||
"name": name,
|
||||
"x": random.uniform(0.1, 0.9),
|
||||
"y": random.uniform(0.1, 0.9),
|
||||
"confidence": random.uniform(0.5, 0.95)
|
||||
})
|
||||
|
||||
# Each keypoint needs 3 values: x, y, confidence
|
||||
# Skip first value (overall confidence), keypoints start at index 1
|
||||
kp_start = 1
|
||||
values_per_kp = 3
|
||||
total_kp_values = len(keypoint_names) * values_per_kp
|
||||
|
||||
if output.shape[0] >= kp_start + total_kp_values:
|
||||
kp_data = output[kp_start:kp_start + total_kp_values]
|
||||
for j, name in enumerate(keypoint_names):
|
||||
offset = j * values_per_kp
|
||||
x = float(torch.sigmoid(kp_data[offset]).item())
|
||||
y = float(torch.sigmoid(kp_data[offset + 1]).item())
|
||||
conf = float(torch.sigmoid(kp_data[offset + 2]).item())
|
||||
keypoints.append({"name": name, "x": x, "y": y, "confidence": conf})
|
||||
else:
|
||||
# Not enough output dimensions for full keypoints; return zeros
|
||||
for name in keypoint_names:
|
||||
keypoints.append({"name": name, "x": 0.0, "y": 0.0, "confidence": 0.0})
|
||||
|
||||
return keypoints
|
||||
|
||||
def _extract_bbox_from_output(self, output: torch.Tensor) -> Dict[str, float]:
|
||||
"""Extract bounding box from a single person's model output.
|
||||
|
||||
Looks for bbox values after the keypoint section. If not available,
|
||||
returns a zero bounding box.
|
||||
|
||||
Args:
|
||||
output: Single-person output tensor.
|
||||
|
||||
Returns:
|
||||
Bounding box dictionary with x, y, width, height.
|
||||
"""
|
||||
# Bounding box comes after: 1 (confidence) + 17*3 (keypoints) = 52
|
||||
bbox_start = 52
|
||||
if output.shape[0] >= bbox_start + 4:
|
||||
x = float(torch.sigmoid(output[bbox_start]).item())
|
||||
y = float(torch.sigmoid(output[bbox_start + 1]).item())
|
||||
w = float(torch.sigmoid(output[bbox_start + 2]).item())
|
||||
h = float(torch.sigmoid(output[bbox_start + 3]).item())
|
||||
return {"x": x, "y": y, "width": w, "height": h}
|
||||
else:
|
||||
return {"x": 0.0, "y": 0.0, "width": 0.0, "height": 0.0}
|
||||
|
||||
def _generate_bounding_box(self) -> Dict[str, float]:
|
||||
"""Generate bounding box for a person."""
|
||||
import random
|
||||
|
||||
x = random.uniform(0.1, 0.6)
|
||||
y = random.uniform(0.1, 0.6)
|
||||
width = random.uniform(0.2, 0.4)
|
||||
height = random.uniform(0.3, 0.5)
|
||||
|
||||
return {
|
||||
"x": x,
|
||||
"y": y,
|
||||
"width": width,
|
||||
"height": height
|
||||
}
|
||||
|
||||
def _generate_mock_poses(self) -> List[Dict[str, Any]]:
|
||||
"""Generate mock pose data for development.
|
||||
|
||||
Delegates to the testing module. Only callable when mock_pose_data is True.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If called without mock_pose_data enabled,
|
||||
indicating that real CSI data and trained models are required.
|
||||
"""
|
||||
if not self.settings.mock_pose_data:
|
||||
raise NotImplementedError(
|
||||
"Mock pose generation is disabled. Real pose estimation requires "
|
||||
"CSI data from configured hardware and trained model weights. "
|
||||
"Set mock_pose_data=True in settings for development, or provide "
|
||||
"real CSI input. See docs/hardware-setup.md."
|
||||
)
|
||||
from src.testing.mock_pose_generator import generate_mock_poses
|
||||
return generate_mock_poses(max_persons=self.settings.pose_max_persons)
|
||||
|
||||
def _classify_activity(self, features: torch.Tensor) -> str:
|
||||
"""Classify activity from features."""
|
||||
# Simple mock classification
|
||||
import random
|
||||
activities = ["standing", "sitting", "walking", "lying", "unknown"]
|
||||
return random.choice(activities)
|
||||
"""Classify activity from model features.
|
||||
|
||||
Uses the magnitude of the feature tensor to make a simple threshold-based
|
||||
classification. This is a basic heuristic; a proper activity classifier
|
||||
should be trained and loaded alongside the pose model.
|
||||
"""
|
||||
feature_norm = float(torch.norm(features).item())
|
||||
# Deterministic classification based on feature magnitude ranges
|
||||
if feature_norm > 2.0:
|
||||
return "walking"
|
||||
elif feature_norm > 1.0:
|
||||
return "standing"
|
||||
elif feature_norm > 0.5:
|
||||
return "sitting"
|
||||
elif feature_norm > 0.1:
|
||||
return "lying"
|
||||
else:
|
||||
return "unknown"
|
||||
|
||||
def _update_stats(self, poses: List[Dict[str, Any]], processing_time: float):
|
||||
"""Update processing statistics."""
|
||||
@@ -424,21 +484,56 @@ class PoseService:
|
||||
|
||||
# API endpoint methods
|
||||
async def estimate_poses(self, zone_ids=None, confidence_threshold=None, max_persons=None,
|
||||
include_keypoints=True, include_segmentation=False):
|
||||
"""Estimate poses with API parameters."""
|
||||
include_keypoints=True, include_segmentation=False,
|
||||
csi_data: Optional[np.ndarray] = None):
|
||||
"""Estimate poses with API parameters.
|
||||
|
||||
Args:
|
||||
zone_ids: List of zone identifiers to estimate poses for.
|
||||
confidence_threshold: Minimum confidence threshold for detections.
|
||||
max_persons: Maximum number of persons to return.
|
||||
include_keypoints: Whether to include keypoint data.
|
||||
include_segmentation: Whether to include segmentation masks.
|
||||
csi_data: Real CSI data array. Required when mock_pose_data is False.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If no CSI data is provided and mock mode is off.
|
||||
"""
|
||||
try:
|
||||
# Generate mock CSI data for estimation
|
||||
mock_csi = np.random.randn(64, 56, 3) # Mock CSI data
|
||||
if csi_data is None and not self.settings.mock_pose_data:
|
||||
raise NotImplementedError(
|
||||
"Pose estimation requires real CSI data input. No CSI data was provided "
|
||||
"and mock_pose_data is disabled. Either pass csi_data from hardware "
|
||||
"collection, or enable mock_pose_data for development. "
|
||||
"See docs/hardware-setup.md for CSI data collection setup."
|
||||
)
|
||||
|
||||
metadata = {
|
||||
"timestamp": datetime.now(),
|
||||
"zone_ids": zone_ids or ["zone_1"],
|
||||
"confidence_threshold": confidence_threshold or self.settings.pose_confidence_threshold,
|
||||
"max_persons": max_persons or self.settings.pose_max_persons
|
||||
"max_persons": max_persons or self.settings.pose_max_persons,
|
||||
}
|
||||
|
||||
# Process the data
|
||||
result = await self.process_csi_data(mock_csi, metadata)
|
||||
|
||||
|
||||
if csi_data is not None:
|
||||
# Process real CSI data
|
||||
result = await self.process_csi_data(csi_data, metadata)
|
||||
else:
|
||||
# Mock mode: generate mock poses directly (no fake CSI data)
|
||||
from src.testing.mock_pose_generator import generate_mock_poses
|
||||
start_time = datetime.now()
|
||||
mock_poses = generate_mock_poses(
|
||||
max_persons=max_persons or self.settings.pose_max_persons
|
||||
)
|
||||
processing_time = (datetime.now() - start_time).total_seconds() * 1000
|
||||
result = {
|
||||
"timestamp": start_time.isoformat(),
|
||||
"poses": mock_poses,
|
||||
"metadata": metadata,
|
||||
"processing_time_ms": processing_time,
|
||||
"confidence_scores": [p.get("confidence", 0.0) for p in mock_poses],
|
||||
}
|
||||
|
||||
# Format for API response
|
||||
persons = []
|
||||
for i, pose in enumerate(result["poses"]):
|
||||
@@ -448,31 +543,33 @@ class PoseService:
|
||||
"bounding_box": pose["bounding_box"],
|
||||
"zone_id": zone_ids[0] if zone_ids else "zone_1",
|
||||
"activity": pose["activity"],
|
||||
"timestamp": datetime.fromisoformat(pose["timestamp"])
|
||||
"timestamp": datetime.fromisoformat(pose["timestamp"]) if isinstance(pose["timestamp"], str) else pose["timestamp"],
|
||||
}
|
||||
|
||||
|
||||
if include_keypoints:
|
||||
person["keypoints"] = pose["keypoints"]
|
||||
|
||||
if include_segmentation:
|
||||
|
||||
if include_segmentation and not self.settings.mock_pose_data:
|
||||
person["segmentation"] = {"mask": "real_segmentation_data"}
|
||||
elif include_segmentation:
|
||||
person["segmentation"] = {"mask": "mock_segmentation_data"}
|
||||
|
||||
|
||||
persons.append(person)
|
||||
|
||||
|
||||
# Zone summary
|
||||
zone_summary = {}
|
||||
for zone_id in (zone_ids or ["zone_1"]):
|
||||
zone_summary[zone_id] = len([p for p in persons if p.get("zone_id") == zone_id])
|
||||
|
||||
|
||||
return {
|
||||
"timestamp": datetime.now(),
|
||||
"frame_id": f"frame_{int(datetime.now().timestamp())}",
|
||||
"persons": persons,
|
||||
"zone_summary": zone_summary,
|
||||
"processing_time_ms": result["processing_time_ms"],
|
||||
"metadata": {"mock_data": self.settings.mock_pose_data}
|
||||
"metadata": {"mock_data": self.settings.mock_pose_data},
|
||||
}
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in estimate_poses: {e}")
|
||||
raise
|
||||
@@ -484,132 +581,105 @@ class PoseService:
|
||||
include_keypoints, include_segmentation)
|
||||
|
||||
async def get_zone_occupancy(self, zone_id: str):
|
||||
"""Get current occupancy for a specific zone."""
|
||||
"""Get current occupancy for a specific zone.
|
||||
|
||||
In mock mode, delegates to testing module. In production mode, returns
|
||||
data based on actual pose estimation results or reports no data available.
|
||||
"""
|
||||
try:
|
||||
# Mock occupancy data
|
||||
import random
|
||||
count = random.randint(0, 5)
|
||||
persons = []
|
||||
|
||||
for i in range(count):
|
||||
persons.append({
|
||||
"person_id": f"person_{i}",
|
||||
"confidence": random.uniform(0.7, 0.95),
|
||||
"activity": random.choice(["standing", "sitting", "walking"])
|
||||
})
|
||||
|
||||
if self.settings.mock_pose_data:
|
||||
from src.testing.mock_pose_generator import generate_mock_zone_occupancy
|
||||
return generate_mock_zone_occupancy(zone_id)
|
||||
|
||||
# Production: no real-time occupancy data without active CSI stream
|
||||
return {
|
||||
"count": count,
|
||||
"count": 0,
|
||||
"max_occupancy": 10,
|
||||
"persons": persons,
|
||||
"timestamp": datetime.now()
|
||||
"persons": [],
|
||||
"timestamp": datetime.now(),
|
||||
"note": "No real-time CSI data available. Connect hardware to get live occupancy.",
|
||||
}
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting zone occupancy: {e}")
|
||||
return None
|
||||
|
||||
async def get_zones_summary(self):
|
||||
"""Get occupancy summary for all zones."""
|
||||
"""Get occupancy summary for all zones.
|
||||
|
||||
In mock mode, delegates to testing module. In production, returns
|
||||
empty zones until real CSI data is being processed.
|
||||
"""
|
||||
try:
|
||||
import random
|
||||
if self.settings.mock_pose_data:
|
||||
from src.testing.mock_pose_generator import generate_mock_zones_summary
|
||||
return generate_mock_zones_summary()
|
||||
|
||||
# Production: no real-time data without active CSI stream
|
||||
zones = ["zone_1", "zone_2", "zone_3", "zone_4"]
|
||||
zone_data = {}
|
||||
total_persons = 0
|
||||
active_zones = 0
|
||||
|
||||
for zone_id in zones:
|
||||
count = random.randint(0, 3)
|
||||
zone_data[zone_id] = {
|
||||
"occupancy": count,
|
||||
"occupancy": 0,
|
||||
"max_occupancy": 10,
|
||||
"status": "active" if count > 0 else "inactive"
|
||||
"status": "inactive",
|
||||
}
|
||||
total_persons += count
|
||||
if count > 0:
|
||||
active_zones += 1
|
||||
|
||||
|
||||
return {
|
||||
"total_persons": total_persons,
|
||||
"total_persons": 0,
|
||||
"zones": zone_data,
|
||||
"active_zones": active_zones
|
||||
"active_zones": 0,
|
||||
"note": "No real-time CSI data available. Connect hardware to get live occupancy.",
|
||||
}
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting zones summary: {e}")
|
||||
raise
|
||||
|
||||
async def get_historical_data(self, start_time, end_time, zone_ids=None,
|
||||
aggregation_interval=300, include_raw_data=False):
|
||||
"""Get historical pose estimation data."""
|
||||
"""Get historical pose estimation data.
|
||||
|
||||
In mock mode, delegates to testing module. In production, returns
|
||||
empty data indicating no historical records are stored yet.
|
||||
"""
|
||||
try:
|
||||
# Mock historical data
|
||||
import random
|
||||
from datetime import timedelta
|
||||
|
||||
current_time = start_time
|
||||
aggregated_data = []
|
||||
raw_data = [] if include_raw_data else None
|
||||
|
||||
while current_time < end_time:
|
||||
# Generate aggregated data point
|
||||
data_point = {
|
||||
"timestamp": current_time,
|
||||
"total_persons": random.randint(0, 8),
|
||||
"zones": {}
|
||||
}
|
||||
|
||||
for zone_id in (zone_ids or ["zone_1", "zone_2", "zone_3"]):
|
||||
data_point["zones"][zone_id] = {
|
||||
"occupancy": random.randint(0, 3),
|
||||
"avg_confidence": random.uniform(0.7, 0.95)
|
||||
}
|
||||
|
||||
aggregated_data.append(data_point)
|
||||
|
||||
# Generate raw data if requested
|
||||
if include_raw_data:
|
||||
for _ in range(random.randint(0, 5)):
|
||||
raw_data.append({
|
||||
"timestamp": current_time + timedelta(seconds=random.randint(0, aggregation_interval)),
|
||||
"person_id": f"person_{random.randint(1, 10)}",
|
||||
"zone_id": random.choice(zone_ids or ["zone_1", "zone_2", "zone_3"]),
|
||||
"confidence": random.uniform(0.5, 0.95),
|
||||
"activity": random.choice(["standing", "sitting", "walking"])
|
||||
})
|
||||
|
||||
current_time += timedelta(seconds=aggregation_interval)
|
||||
|
||||
if self.settings.mock_pose_data:
|
||||
from src.testing.mock_pose_generator import generate_mock_historical_data
|
||||
return generate_mock_historical_data(
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
zone_ids=zone_ids,
|
||||
aggregation_interval=aggregation_interval,
|
||||
include_raw_data=include_raw_data,
|
||||
)
|
||||
|
||||
# Production: no historical data without a persistence backend
|
||||
return {
|
||||
"aggregated_data": aggregated_data,
|
||||
"raw_data": raw_data,
|
||||
"total_records": len(aggregated_data)
|
||||
"aggregated_data": [],
|
||||
"raw_data": [] if include_raw_data else None,
|
||||
"total_records": 0,
|
||||
"note": "No historical data available. A data persistence backend must be configured to store historical records.",
|
||||
}
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting historical data: {e}")
|
||||
raise
|
||||
|
||||
async def get_recent_activities(self, zone_id=None, limit=10):
|
||||
"""Get recently detected activities."""
|
||||
"""Get recently detected activities.
|
||||
|
||||
In mock mode, delegates to testing module. In production, returns
|
||||
empty list indicating no activity data has been recorded yet.
|
||||
"""
|
||||
try:
|
||||
import random
|
||||
activities = []
|
||||
|
||||
for i in range(limit):
|
||||
activity = {
|
||||
"activity_id": f"activity_{i}",
|
||||
"person_id": f"person_{random.randint(1, 5)}",
|
||||
"zone_id": zone_id or random.choice(["zone_1", "zone_2", "zone_3"]),
|
||||
"activity": random.choice(["standing", "sitting", "walking", "lying"]),
|
||||
"confidence": random.uniform(0.6, 0.95),
|
||||
"timestamp": datetime.now() - timedelta(minutes=random.randint(0, 60)),
|
||||
"duration_seconds": random.randint(10, 300)
|
||||
}
|
||||
activities.append(activity)
|
||||
|
||||
return activities
|
||||
|
||||
if self.settings.mock_pose_data:
|
||||
from src.testing.mock_pose_generator import generate_mock_recent_activities
|
||||
return generate_mock_recent_activities(zone_id=zone_id, limit=limit)
|
||||
|
||||
# Production: no activity records without an active CSI stream
|
||||
return []
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting recent activities: {e}")
|
||||
raise
|
||||
|
||||
301
v1/src/testing/mock_pose_generator.py
Normal file
301
v1/src/testing/mock_pose_generator.py
Normal file
@@ -0,0 +1,301 @@
|
||||
"""
|
||||
Mock pose data generator for testing and development.
|
||||
|
||||
This module provides synthetic pose estimation data for use in development
|
||||
and testing environments ONLY. The generated data mimics realistic human
|
||||
pose detection outputs including keypoints, bounding boxes, and activities.
|
||||
|
||||
WARNING: This module uses random number generation intentionally for test data.
|
||||
Do NOT use this module in production data paths.
|
||||
"""
|
||||
|
||||
import random
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Banner displayed when mock pose mode is active
|
||||
MOCK_POSE_BANNER = """
|
||||
================================================================================
|
||||
WARNING: MOCK POSE MODE ACTIVE - Using synthetic pose data
|
||||
|
||||
All pose detections are randomly generated and do NOT represent real humans.
|
||||
For real pose estimation, provide trained model weights and real CSI data.
|
||||
See docs/hardware-setup.md for configuration instructions.
|
||||
================================================================================
|
||||
"""
|
||||
|
||||
_banner_shown = False
|
||||
|
||||
|
||||
def _show_banner() -> None:
|
||||
"""Display the mock pose mode warning banner (once per session)."""
|
||||
global _banner_shown
|
||||
if not _banner_shown:
|
||||
logger.warning(MOCK_POSE_BANNER)
|
||||
_banner_shown = True
|
||||
|
||||
|
||||
def generate_mock_keypoints() -> List[Dict[str, Any]]:
|
||||
"""Generate mock keypoints for a single person.
|
||||
|
||||
Returns:
|
||||
List of 17 COCO-format keypoint dictionaries with name, x, y, confidence.
|
||||
"""
|
||||
keypoint_names = [
|
||||
"nose", "left_eye", "right_eye", "left_ear", "right_ear",
|
||||
"left_shoulder", "right_shoulder", "left_elbow", "right_elbow",
|
||||
"left_wrist", "right_wrist", "left_hip", "right_hip",
|
||||
"left_knee", "right_knee", "left_ankle", "right_ankle",
|
||||
]
|
||||
|
||||
keypoints = []
|
||||
for name in keypoint_names:
|
||||
keypoints.append({
|
||||
"name": name,
|
||||
"x": random.uniform(0.1, 0.9),
|
||||
"y": random.uniform(0.1, 0.9),
|
||||
"confidence": random.uniform(0.5, 0.95),
|
||||
})
|
||||
|
||||
return keypoints
|
||||
|
||||
|
||||
def generate_mock_bounding_box() -> Dict[str, float]:
|
||||
"""Generate a mock bounding box for a single person.
|
||||
|
||||
Returns:
|
||||
Dictionary with x, y, width, height as normalized coordinates.
|
||||
"""
|
||||
x = random.uniform(0.1, 0.6)
|
||||
y = random.uniform(0.1, 0.6)
|
||||
width = random.uniform(0.2, 0.4)
|
||||
height = random.uniform(0.3, 0.5)
|
||||
|
||||
return {"x": x, "y": y, "width": width, "height": height}
|
||||
|
||||
|
||||
def generate_mock_poses(max_persons: int = 3) -> List[Dict[str, Any]]:
|
||||
"""Generate mock pose detections for testing.
|
||||
|
||||
Args:
|
||||
max_persons: Maximum number of persons to generate (1 to max_persons).
|
||||
|
||||
Returns:
|
||||
List of pose detection dictionaries.
|
||||
"""
|
||||
_show_banner()
|
||||
|
||||
num_persons = random.randint(1, min(3, max_persons))
|
||||
poses = []
|
||||
|
||||
for i in range(num_persons):
|
||||
confidence = random.uniform(0.3, 0.95)
|
||||
|
||||
pose = {
|
||||
"person_id": i,
|
||||
"confidence": confidence,
|
||||
"keypoints": generate_mock_keypoints(),
|
||||
"bounding_box": generate_mock_bounding_box(),
|
||||
"activity": random.choice(["standing", "sitting", "walking", "lying"]),
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
poses.append(pose)
|
||||
|
||||
return poses
|
||||
|
||||
|
||||
def generate_mock_zone_occupancy(zone_id: str) -> Dict[str, Any]:
|
||||
"""Generate mock zone occupancy data.
|
||||
|
||||
Args:
|
||||
zone_id: Zone identifier.
|
||||
|
||||
Returns:
|
||||
Dictionary with occupancy count and person details.
|
||||
"""
|
||||
_show_banner()
|
||||
|
||||
count = random.randint(0, 5)
|
||||
persons = []
|
||||
|
||||
for i in range(count):
|
||||
persons.append({
|
||||
"person_id": f"person_{i}",
|
||||
"confidence": random.uniform(0.7, 0.95),
|
||||
"activity": random.choice(["standing", "sitting", "walking"]),
|
||||
})
|
||||
|
||||
return {
|
||||
"count": count,
|
||||
"max_occupancy": 10,
|
||||
"persons": persons,
|
||||
"timestamp": datetime.now(),
|
||||
}
|
||||
|
||||
|
||||
def generate_mock_zones_summary(
|
||||
zone_ids: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate mock zones summary data.
|
||||
|
||||
Args:
|
||||
zone_ids: List of zone identifiers. Defaults to zone_1 through zone_4.
|
||||
|
||||
Returns:
|
||||
Dictionary with per-zone occupancy and aggregate counts.
|
||||
"""
|
||||
_show_banner()
|
||||
|
||||
zones = zone_ids or ["zone_1", "zone_2", "zone_3", "zone_4"]
|
||||
zone_data = {}
|
||||
total_persons = 0
|
||||
active_zones = 0
|
||||
|
||||
for zone_id in zones:
|
||||
count = random.randint(0, 3)
|
||||
zone_data[zone_id] = {
|
||||
"occupancy": count,
|
||||
"max_occupancy": 10,
|
||||
"status": "active" if count > 0 else "inactive",
|
||||
}
|
||||
total_persons += count
|
||||
if count > 0:
|
||||
active_zones += 1
|
||||
|
||||
return {
|
||||
"total_persons": total_persons,
|
||||
"zones": zone_data,
|
||||
"active_zones": active_zones,
|
||||
}
|
||||
|
||||
|
||||
def generate_mock_historical_data(
|
||||
start_time: datetime,
|
||||
end_time: datetime,
|
||||
zone_ids: Optional[List[str]] = None,
|
||||
aggregation_interval: int = 300,
|
||||
include_raw_data: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate mock historical pose data.
|
||||
|
||||
Args:
|
||||
start_time: Start of the time range.
|
||||
end_time: End of the time range.
|
||||
zone_ids: Zones to include. Defaults to zone_1, zone_2, zone_3.
|
||||
aggregation_interval: Seconds between data points.
|
||||
include_raw_data: Whether to include simulated raw detections.
|
||||
|
||||
Returns:
|
||||
Dictionary with aggregated_data, optional raw_data, and total_records.
|
||||
"""
|
||||
_show_banner()
|
||||
|
||||
zones = zone_ids or ["zone_1", "zone_2", "zone_3"]
|
||||
current_time = start_time
|
||||
aggregated_data = []
|
||||
raw_data = [] if include_raw_data else None
|
||||
|
||||
while current_time < end_time:
|
||||
data_point = {
|
||||
"timestamp": current_time,
|
||||
"total_persons": random.randint(0, 8),
|
||||
"zones": {},
|
||||
}
|
||||
|
||||
for zone_id in zones:
|
||||
data_point["zones"][zone_id] = {
|
||||
"occupancy": random.randint(0, 3),
|
||||
"avg_confidence": random.uniform(0.7, 0.95),
|
||||
}
|
||||
|
||||
aggregated_data.append(data_point)
|
||||
|
||||
if include_raw_data:
|
||||
for _ in range(random.randint(0, 5)):
|
||||
raw_data.append({
|
||||
"timestamp": current_time + timedelta(seconds=random.randint(0, aggregation_interval)),
|
||||
"person_id": f"person_{random.randint(1, 10)}",
|
||||
"zone_id": random.choice(zones),
|
||||
"confidence": random.uniform(0.5, 0.95),
|
||||
"activity": random.choice(["standing", "sitting", "walking"]),
|
||||
})
|
||||
|
||||
current_time += timedelta(seconds=aggregation_interval)
|
||||
|
||||
return {
|
||||
"aggregated_data": aggregated_data,
|
||||
"raw_data": raw_data,
|
||||
"total_records": len(aggregated_data),
|
||||
}
|
||||
|
||||
|
||||
def generate_mock_recent_activities(
|
||||
zone_id: Optional[str] = None,
|
||||
limit: int = 10,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Generate mock recent activity data.
|
||||
|
||||
Args:
|
||||
zone_id: Optional zone filter. If None, random zones are used.
|
||||
limit: Number of activities to generate.
|
||||
|
||||
Returns:
|
||||
List of activity dictionaries.
|
||||
"""
|
||||
_show_banner()
|
||||
|
||||
activities = []
|
||||
|
||||
for i in range(limit):
|
||||
activity = {
|
||||
"activity_id": f"activity_{i}",
|
||||
"person_id": f"person_{random.randint(1, 5)}",
|
||||
"zone_id": zone_id or random.choice(["zone_1", "zone_2", "zone_3"]),
|
||||
"activity": random.choice(["standing", "sitting", "walking", "lying"]),
|
||||
"confidence": random.uniform(0.6, 0.95),
|
||||
"timestamp": datetime.now() - timedelta(minutes=random.randint(0, 60)),
|
||||
"duration_seconds": random.randint(10, 300),
|
||||
}
|
||||
activities.append(activity)
|
||||
|
||||
return activities
|
||||
|
||||
|
||||
def generate_mock_statistics(
|
||||
start_time: datetime,
|
||||
end_time: datetime,
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate mock pose estimation statistics.
|
||||
|
||||
Args:
|
||||
start_time: Start of the statistics period.
|
||||
end_time: End of the statistics period.
|
||||
|
||||
Returns:
|
||||
Dictionary with detection counts, rates, and distributions.
|
||||
"""
|
||||
_show_banner()
|
||||
|
||||
total_detections = random.randint(100, 1000)
|
||||
successful_detections = int(total_detections * random.uniform(0.8, 0.95))
|
||||
|
||||
return {
|
||||
"total_detections": total_detections,
|
||||
"successful_detections": successful_detections,
|
||||
"failed_detections": total_detections - successful_detections,
|
||||
"success_rate": successful_detections / total_detections,
|
||||
"average_confidence": random.uniform(0.75, 0.90),
|
||||
"average_processing_time_ms": random.uniform(50, 200),
|
||||
"unique_persons": random.randint(5, 20),
|
||||
"most_active_zone": random.choice(["zone_1", "zone_2", "zone_3"]),
|
||||
"activity_distribution": {
|
||||
"standing": random.uniform(0.3, 0.5),
|
||||
"sitting": random.uniform(0.2, 0.4),
|
||||
"walking": random.uniform(0.1, 0.3),
|
||||
"lying": random.uniform(0.0, 0.1),
|
||||
},
|
||||
}
|
||||
Reference in New Issue
Block a user