updates
This commit is contained in:
398
scripts/validate-deployment.sh
Executable file
398
scripts/validate-deployment.sh
Executable file
@@ -0,0 +1,398 @@
|
||||
#!/bin/bash
|
||||
|
||||
# WiFi-DensePose Deployment Validation Script
|
||||
# This script validates that all deployment components are functioning correctly
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
NAMESPACE="wifi-densepose"
|
||||
MONITORING_NAMESPACE="monitoring"
|
||||
TIMEOUT=300
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if kubectl is available and configured
|
||||
check_kubectl() {
|
||||
log_info "Checking kubectl configuration..."
|
||||
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
log_error "kubectl is not installed or not in PATH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! kubectl cluster-info &> /dev/null; then
|
||||
log_error "kubectl is not configured or cluster is not accessible"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_success "kubectl is configured and cluster is accessible"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate namespace exists
|
||||
validate_namespace() {
|
||||
local ns=$1
|
||||
log_info "Validating namespace: $ns"
|
||||
|
||||
if kubectl get namespace "$ns" &> /dev/null; then
|
||||
log_success "Namespace $ns exists"
|
||||
return 0
|
||||
else
|
||||
log_error "Namespace $ns does not exist"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate deployments are ready
|
||||
validate_deployments() {
|
||||
log_info "Validating deployments in namespace: $NAMESPACE"
|
||||
|
||||
local deployments
|
||||
deployments=$(kubectl get deployments -n "$NAMESPACE" -o jsonpath='{.items[*].metadata.name}')
|
||||
|
||||
if [ -z "$deployments" ]; then
|
||||
log_warning "No deployments found in namespace $NAMESPACE"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local failed=0
|
||||
for deployment in $deployments; do
|
||||
log_info "Checking deployment: $deployment"
|
||||
|
||||
if kubectl wait --for=condition=available --timeout="${TIMEOUT}s" "deployment/$deployment" -n "$NAMESPACE" &> /dev/null; then
|
||||
local ready_replicas
|
||||
ready_replicas=$(kubectl get deployment "$deployment" -n "$NAMESPACE" -o jsonpath='{.status.readyReplicas}')
|
||||
local desired_replicas
|
||||
desired_replicas=$(kubectl get deployment "$deployment" -n "$NAMESPACE" -o jsonpath='{.spec.replicas}')
|
||||
|
||||
if [ "$ready_replicas" = "$desired_replicas" ]; then
|
||||
log_success "Deployment $deployment is ready ($ready_replicas/$desired_replicas replicas)"
|
||||
else
|
||||
log_warning "Deployment $deployment has $ready_replicas/$desired_replicas replicas ready"
|
||||
failed=1
|
||||
fi
|
||||
else
|
||||
log_error "Deployment $deployment is not ready within ${TIMEOUT}s"
|
||||
failed=1
|
||||
fi
|
||||
done
|
||||
|
||||
return $failed
|
||||
}
|
||||
|
||||
# Validate services are accessible
|
||||
validate_services() {
|
||||
log_info "Validating services in namespace: $NAMESPACE"
|
||||
|
||||
local services
|
||||
services=$(kubectl get services -n "$NAMESPACE" -o jsonpath='{.items[*].metadata.name}')
|
||||
|
||||
if [ -z "$services" ]; then
|
||||
log_warning "No services found in namespace $NAMESPACE"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local failed=0
|
||||
for service in $services; do
|
||||
log_info "Checking service: $service"
|
||||
|
||||
local endpoints
|
||||
endpoints=$(kubectl get endpoints "$service" -n "$NAMESPACE" -o jsonpath='{.subsets[*].addresses[*].ip}')
|
||||
|
||||
if [ -n "$endpoints" ]; then
|
||||
log_success "Service $service has endpoints: $endpoints"
|
||||
else
|
||||
log_error "Service $service has no endpoints"
|
||||
failed=1
|
||||
fi
|
||||
done
|
||||
|
||||
return $failed
|
||||
}
|
||||
|
||||
# Validate ingress configuration
|
||||
validate_ingress() {
|
||||
log_info "Validating ingress configuration in namespace: $NAMESPACE"
|
||||
|
||||
local ingresses
|
||||
ingresses=$(kubectl get ingress -n "$NAMESPACE" -o jsonpath='{.items[*].metadata.name}')
|
||||
|
||||
if [ -z "$ingresses" ]; then
|
||||
log_warning "No ingress resources found in namespace $NAMESPACE"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local failed=0
|
||||
for ingress in $ingresses; do
|
||||
log_info "Checking ingress: $ingress"
|
||||
|
||||
local hosts
|
||||
hosts=$(kubectl get ingress "$ingress" -n "$NAMESPACE" -o jsonpath='{.spec.rules[*].host}')
|
||||
|
||||
if [ -n "$hosts" ]; then
|
||||
log_success "Ingress $ingress configured for hosts: $hosts"
|
||||
|
||||
# Check if ingress has an IP/hostname assigned
|
||||
local address
|
||||
address=$(kubectl get ingress "$ingress" -n "$NAMESPACE" -o jsonpath='{.status.loadBalancer.ingress[0].ip}{.status.loadBalancer.ingress[0].hostname}')
|
||||
|
||||
if [ -n "$address" ]; then
|
||||
log_success "Ingress $ingress has address: $address"
|
||||
else
|
||||
log_warning "Ingress $ingress does not have an assigned address yet"
|
||||
fi
|
||||
else
|
||||
log_error "Ingress $ingress has no configured hosts"
|
||||
failed=1
|
||||
fi
|
||||
done
|
||||
|
||||
return $failed
|
||||
}
|
||||
|
||||
# Validate ConfigMaps and Secrets
|
||||
validate_config() {
|
||||
log_info "Validating ConfigMaps and Secrets in namespace: $NAMESPACE"
|
||||
|
||||
# Check ConfigMaps
|
||||
local configmaps
|
||||
configmaps=$(kubectl get configmaps -n "$NAMESPACE" -o jsonpath='{.items[*].metadata.name}')
|
||||
|
||||
if [ -n "$configmaps" ]; then
|
||||
log_success "ConfigMaps found: $configmaps"
|
||||
else
|
||||
log_warning "No ConfigMaps found in namespace $NAMESPACE"
|
||||
fi
|
||||
|
||||
# Check Secrets
|
||||
local secrets
|
||||
secrets=$(kubectl get secrets -n "$NAMESPACE" -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | grep -v "default-token" | tr '\n' ' ')
|
||||
|
||||
if [ -n "$secrets" ]; then
|
||||
log_success "Secrets found: $secrets"
|
||||
else
|
||||
log_warning "No custom secrets found in namespace $NAMESPACE"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate HPA configuration
|
||||
validate_hpa() {
|
||||
log_info "Validating Horizontal Pod Autoscaler in namespace: $NAMESPACE"
|
||||
|
||||
local hpas
|
||||
hpas=$(kubectl get hpa -n "$NAMESPACE" -o jsonpath='{.items[*].metadata.name}')
|
||||
|
||||
if [ -z "$hpas" ]; then
|
||||
log_warning "No HPA resources found in namespace $NAMESPACE"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local failed=0
|
||||
for hpa in $hpas; do
|
||||
log_info "Checking HPA: $hpa"
|
||||
|
||||
local current_replicas
|
||||
current_replicas=$(kubectl get hpa "$hpa" -n "$NAMESPACE" -o jsonpath='{.status.currentReplicas}')
|
||||
local desired_replicas
|
||||
desired_replicas=$(kubectl get hpa "$hpa" -n "$NAMESPACE" -o jsonpath='{.status.desiredReplicas}')
|
||||
|
||||
if [ -n "$current_replicas" ] && [ -n "$desired_replicas" ]; then
|
||||
log_success "HPA $hpa: current=$current_replicas, desired=$desired_replicas"
|
||||
else
|
||||
log_warning "HPA $hpa metrics not available yet"
|
||||
fi
|
||||
done
|
||||
|
||||
return $failed
|
||||
}
|
||||
|
||||
# Test application health endpoints
|
||||
test_health_endpoints() {
|
||||
log_info "Testing application health endpoints..."
|
||||
|
||||
# Get application pods
|
||||
local pods
|
||||
pods=$(kubectl get pods -n "$NAMESPACE" -l app=wifi-densepose -o jsonpath='{.items[*].metadata.name}')
|
||||
|
||||
if [ -z "$pods" ]; then
|
||||
log_error "No application pods found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local failed=0
|
||||
for pod in $pods; do
|
||||
log_info "Testing health endpoint for pod: $pod"
|
||||
|
||||
# Port forward and test health endpoint
|
||||
kubectl port-forward "pod/$pod" 8080:8080 -n "$NAMESPACE" &
|
||||
local pf_pid=$!
|
||||
sleep 2
|
||||
|
||||
if curl -f http://localhost:8080/health &> /dev/null; then
|
||||
log_success "Health endpoint for pod $pod is responding"
|
||||
else
|
||||
log_error "Health endpoint for pod $pod is not responding"
|
||||
failed=1
|
||||
fi
|
||||
|
||||
kill $pf_pid 2>/dev/null || true
|
||||
sleep 1
|
||||
done
|
||||
|
||||
return $failed
|
||||
}
|
||||
|
||||
# Validate monitoring stack
|
||||
validate_monitoring() {
|
||||
log_info "Validating monitoring stack in namespace: $MONITORING_NAMESPACE"
|
||||
|
||||
if ! validate_namespace "$MONITORING_NAMESPACE"; then
|
||||
log_warning "Monitoring namespace not found, skipping monitoring validation"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check Prometheus
|
||||
if kubectl get deployment prometheus-server -n "$MONITORING_NAMESPACE" &> /dev/null; then
|
||||
if kubectl wait --for=condition=available --timeout=60s deployment/prometheus-server -n "$MONITORING_NAMESPACE" &> /dev/null; then
|
||||
log_success "Prometheus is running"
|
||||
else
|
||||
log_error "Prometheus is not ready"
|
||||
fi
|
||||
else
|
||||
log_warning "Prometheus deployment not found"
|
||||
fi
|
||||
|
||||
# Check Grafana
|
||||
if kubectl get deployment grafana -n "$MONITORING_NAMESPACE" &> /dev/null; then
|
||||
if kubectl wait --for=condition=available --timeout=60s deployment/grafana -n "$MONITORING_NAMESPACE" &> /dev/null; then
|
||||
log_success "Grafana is running"
|
||||
else
|
||||
log_error "Grafana is not ready"
|
||||
fi
|
||||
else
|
||||
log_warning "Grafana deployment not found"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate logging stack
|
||||
validate_logging() {
|
||||
log_info "Validating logging stack..."
|
||||
|
||||
# Check Fluentd DaemonSet
|
||||
if kubectl get daemonset fluentd -n kube-system &> /dev/null; then
|
||||
local desired
|
||||
desired=$(kubectl get daemonset fluentd -n kube-system -o jsonpath='{.status.desiredNumberScheduled}')
|
||||
local ready
|
||||
ready=$(kubectl get daemonset fluentd -n kube-system -o jsonpath='{.status.numberReady}')
|
||||
|
||||
if [ "$desired" = "$ready" ]; then
|
||||
log_success "Fluentd DaemonSet is ready ($ready/$desired nodes)"
|
||||
else
|
||||
log_warning "Fluentd DaemonSet has $ready/$desired pods ready"
|
||||
fi
|
||||
else
|
||||
log_warning "Fluentd DaemonSet not found"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check resource usage
|
||||
check_resource_usage() {
|
||||
log_info "Checking resource usage..."
|
||||
|
||||
# Check node resource usage
|
||||
log_info "Node resource usage:"
|
||||
kubectl top nodes 2>/dev/null || log_warning "Metrics server not available for node metrics"
|
||||
|
||||
# Check pod resource usage
|
||||
log_info "Pod resource usage in namespace $NAMESPACE:"
|
||||
kubectl top pods -n "$NAMESPACE" 2>/dev/null || log_warning "Metrics server not available for pod metrics"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Generate validation report
|
||||
generate_report() {
|
||||
local total_checks=$1
|
||||
local failed_checks=$2
|
||||
local passed_checks=$((total_checks - failed_checks))
|
||||
|
||||
echo ""
|
||||
log_info "=== Deployment Validation Report ==="
|
||||
echo "Total checks: $total_checks"
|
||||
echo "Passed: $passed_checks"
|
||||
echo "Failed: $failed_checks"
|
||||
|
||||
if [ $failed_checks -eq 0 ]; then
|
||||
log_success "All validation checks passed! 🎉"
|
||||
return 0
|
||||
else
|
||||
log_error "Some validation checks failed. Please review the output above."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main validation function
|
||||
main() {
|
||||
log_info "Starting WiFi-DensePose deployment validation..."
|
||||
|
||||
local total_checks=0
|
||||
local failed_checks=0
|
||||
|
||||
# Run validation checks
|
||||
checks=(
|
||||
"check_kubectl"
|
||||
"validate_namespace $NAMESPACE"
|
||||
"validate_deployments"
|
||||
"validate_services"
|
||||
"validate_ingress"
|
||||
"validate_config"
|
||||
"validate_hpa"
|
||||
"test_health_endpoints"
|
||||
"validate_monitoring"
|
||||
"validate_logging"
|
||||
"check_resource_usage"
|
||||
)
|
||||
|
||||
for check in "${checks[@]}"; do
|
||||
total_checks=$((total_checks + 1))
|
||||
echo ""
|
||||
if ! eval "$check"; then
|
||||
failed_checks=$((failed_checks + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
# Generate final report
|
||||
generate_report $total_checks $failed_checks
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
458
scripts/validate-integration.sh
Executable file
458
scripts/validate-integration.sh
Executable file
@@ -0,0 +1,458 @@
|
||||
#!/bin/bash
|
||||
|
||||
# WiFi-DensePose Integration Validation Script
|
||||
# This script validates the complete system integration
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
VENV_PATH="${PROJECT_ROOT}/.venv"
|
||||
TEST_DB_PATH="${PROJECT_ROOT}/test_integration.db"
|
||||
LOG_FILE="${PROJECT_ROOT}/integration_validation.log"
|
||||
|
||||
# Functions
|
||||
log() {
|
||||
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
success() {
|
||||
echo -e "${GREEN}✅ $1${NC}" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}❌ $1${NC}" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
log "Cleaning up test resources..."
|
||||
|
||||
# Stop any running servers
|
||||
pkill -f "wifi-densepose" || true
|
||||
pkill -f "uvicorn.*src.app" || true
|
||||
|
||||
# Remove test database
|
||||
[ -f "$TEST_DB_PATH" ] && rm -f "$TEST_DB_PATH"
|
||||
|
||||
# Remove test logs
|
||||
find "$PROJECT_ROOT" -name "*.log" -path "*/test*" -delete 2>/dev/null || true
|
||||
|
||||
success "Cleanup completed"
|
||||
}
|
||||
|
||||
check_prerequisites() {
|
||||
log "Checking prerequisites..."
|
||||
|
||||
# Check Python version
|
||||
if ! python3 --version | grep -E "Python 3\.(9|10|11|12)" > /dev/null; then
|
||||
error "Python 3.9+ is required"
|
||||
exit 1
|
||||
fi
|
||||
success "Python version check passed"
|
||||
|
||||
# Check if virtual environment exists
|
||||
if [ ! -d "$VENV_PATH" ]; then
|
||||
warning "Virtual environment not found, creating one..."
|
||||
python3 -m venv "$VENV_PATH"
|
||||
fi
|
||||
success "Virtual environment check passed"
|
||||
|
||||
# Activate virtual environment
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Check if requirements are installed
|
||||
if ! pip list | grep -q "fastapi"; then
|
||||
warning "Dependencies not installed, installing..."
|
||||
pip install -e ".[dev]"
|
||||
fi
|
||||
success "Dependencies check passed"
|
||||
}
|
||||
|
||||
validate_package_structure() {
|
||||
log "Validating package structure..."
|
||||
|
||||
# Check main application files
|
||||
required_files=(
|
||||
"src/__init__.py"
|
||||
"src/main.py"
|
||||
"src/app.py"
|
||||
"src/config.py"
|
||||
"src/logger.py"
|
||||
"src/cli.py"
|
||||
"pyproject.toml"
|
||||
"setup.py"
|
||||
"MANIFEST.in"
|
||||
)
|
||||
|
||||
for file in "${required_files[@]}"; do
|
||||
if [ ! -f "$PROJECT_ROOT/$file" ]; then
|
||||
error "Required file missing: $file"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
success "Package structure validation passed"
|
||||
|
||||
# Check directory structure
|
||||
required_dirs=(
|
||||
"src/config"
|
||||
"src/core"
|
||||
"src/api"
|
||||
"src/services"
|
||||
"src/middleware"
|
||||
"src/database"
|
||||
"src/tasks"
|
||||
"src/commands"
|
||||
"tests/unit"
|
||||
"tests/integration"
|
||||
)
|
||||
|
||||
for dir in "${required_dirs[@]}"; do
|
||||
if [ ! -d "$PROJECT_ROOT/$dir" ]; then
|
||||
error "Required directory missing: $dir"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
success "Directory structure validation passed"
|
||||
}
|
||||
|
||||
validate_imports() {
|
||||
log "Validating Python imports..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Test main package import
|
||||
if ! python -c "import src; print(f'Package version: {src.__version__}')"; then
|
||||
error "Failed to import main package"
|
||||
exit 1
|
||||
fi
|
||||
success "Main package import passed"
|
||||
|
||||
# Test core components
|
||||
core_modules=(
|
||||
"src.app"
|
||||
"src.config.settings"
|
||||
"src.logger"
|
||||
"src.cli"
|
||||
"src.core.csi_processor"
|
||||
"src.core.phase_sanitizer"
|
||||
"src.core.pose_estimator"
|
||||
"src.core.router_interface"
|
||||
"src.services.orchestrator"
|
||||
"src.database.connection"
|
||||
"src.database.models"
|
||||
)
|
||||
|
||||
for module in "${core_modules[@]}"; do
|
||||
if ! python -c "import $module" 2>/dev/null; then
|
||||
error "Failed to import module: $module"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
success "Core modules import passed"
|
||||
}
|
||||
|
||||
validate_configuration() {
|
||||
log "Validating configuration..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Test configuration loading
|
||||
if ! python -c "
|
||||
from src.config.settings import get_settings
|
||||
settings = get_settings()
|
||||
print(f'Environment: {settings.environment}')
|
||||
print(f'Debug: {settings.debug}')
|
||||
print(f'API Version: {settings.api_version}')
|
||||
"; then
|
||||
error "Configuration validation failed"
|
||||
exit 1
|
||||
fi
|
||||
success "Configuration validation passed"
|
||||
}
|
||||
|
||||
validate_database() {
|
||||
log "Validating database integration..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Test database connection and models
|
||||
if ! python -c "
|
||||
import asyncio
|
||||
from src.config.settings import get_settings
|
||||
from src.database.connection import get_database_manager
|
||||
|
||||
async def test_db():
|
||||
settings = get_settings()
|
||||
settings.database_url = 'sqlite+aiosqlite:///test_integration.db'
|
||||
|
||||
db_manager = get_database_manager(settings)
|
||||
await db_manager.initialize()
|
||||
await db_manager.test_connection()
|
||||
|
||||
# Test connection stats
|
||||
stats = await db_manager.get_connection_stats()
|
||||
print(f'Database connected: {stats[\"database\"][\"connected\"]}')
|
||||
|
||||
await db_manager.close_all_connections()
|
||||
print('Database validation passed')
|
||||
|
||||
asyncio.run(test_db())
|
||||
"; then
|
||||
error "Database validation failed"
|
||||
exit 1
|
||||
fi
|
||||
success "Database validation passed"
|
||||
}
|
||||
|
||||
validate_api_endpoints() {
|
||||
log "Validating API endpoints..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Start server in background
|
||||
export WIFI_DENSEPOSE_ENVIRONMENT=test
|
||||
export WIFI_DENSEPOSE_DATABASE_URL="sqlite+aiosqlite:///test_integration.db"
|
||||
|
||||
python -m uvicorn src.app:app --host 127.0.0.1 --port 8888 --log-level error &
|
||||
SERVER_PID=$!
|
||||
|
||||
# Wait for server to start
|
||||
sleep 5
|
||||
|
||||
# Test endpoints
|
||||
endpoints=(
|
||||
"http://127.0.0.1:8888/health"
|
||||
"http://127.0.0.1:8888/metrics"
|
||||
"http://127.0.0.1:8888/api/v1/devices"
|
||||
"http://127.0.0.1:8888/api/v1/sessions"
|
||||
)
|
||||
|
||||
for endpoint in "${endpoints[@]}"; do
|
||||
if ! curl -s -f "$endpoint" > /dev/null; then
|
||||
error "API endpoint failed: $endpoint"
|
||||
kill $SERVER_PID 2>/dev/null || true
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Stop server
|
||||
kill $SERVER_PID 2>/dev/null || true
|
||||
wait $SERVER_PID 2>/dev/null || true
|
||||
|
||||
success "API endpoints validation passed"
|
||||
}
|
||||
|
||||
validate_cli() {
|
||||
log "Validating CLI interface..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Test CLI commands
|
||||
if ! python -m src.cli --help > /dev/null; then
|
||||
error "CLI help command failed"
|
||||
exit 1
|
||||
fi
|
||||
success "CLI help command passed"
|
||||
|
||||
# Test version command
|
||||
if ! python -m src.cli version > /dev/null; then
|
||||
error "CLI version command failed"
|
||||
exit 1
|
||||
fi
|
||||
success "CLI version command passed"
|
||||
|
||||
# Test config validation
|
||||
export WIFI_DENSEPOSE_ENVIRONMENT=test
|
||||
export WIFI_DENSEPOSE_DATABASE_URL="sqlite+aiosqlite:///test_integration.db"
|
||||
|
||||
if ! python -m src.cli config validate > /dev/null; then
|
||||
error "CLI config validation failed"
|
||||
exit 1
|
||||
fi
|
||||
success "CLI config validation passed"
|
||||
}
|
||||
|
||||
validate_background_tasks() {
|
||||
log "Validating background tasks..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Test task managers
|
||||
if ! python -c "
|
||||
import asyncio
|
||||
from src.config.settings import get_settings
|
||||
from src.tasks.cleanup import get_cleanup_manager
|
||||
from src.tasks.monitoring import get_monitoring_manager
|
||||
from src.tasks.backup import get_backup_manager
|
||||
|
||||
async def test_tasks():
|
||||
settings = get_settings()
|
||||
settings.database_url = 'sqlite+aiosqlite:///test_integration.db'
|
||||
|
||||
# Test cleanup manager
|
||||
cleanup_manager = get_cleanup_manager(settings)
|
||||
cleanup_stats = cleanup_manager.get_stats()
|
||||
print(f'Cleanup manager initialized: {\"manager\" in cleanup_stats}')
|
||||
|
||||
# Test monitoring manager
|
||||
monitoring_manager = get_monitoring_manager(settings)
|
||||
monitoring_stats = monitoring_manager.get_stats()
|
||||
print(f'Monitoring manager initialized: {\"manager\" in monitoring_stats}')
|
||||
|
||||
# Test backup manager
|
||||
backup_manager = get_backup_manager(settings)
|
||||
backup_stats = backup_manager.get_stats()
|
||||
print(f'Backup manager initialized: {\"manager\" in backup_stats}')
|
||||
|
||||
print('Background tasks validation passed')
|
||||
|
||||
asyncio.run(test_tasks())
|
||||
"; then
|
||||
error "Background tasks validation failed"
|
||||
exit 1
|
||||
fi
|
||||
success "Background tasks validation passed"
|
||||
}
|
||||
|
||||
run_integration_tests() {
|
||||
log "Running integration tests..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Set test environment
|
||||
export WIFI_DENSEPOSE_ENVIRONMENT=test
|
||||
export WIFI_DENSEPOSE_DATABASE_URL="sqlite+aiosqlite:///test_integration.db"
|
||||
|
||||
# Run integration tests
|
||||
if ! python -m pytest tests/integration/ -v --tb=short; then
|
||||
error "Integration tests failed"
|
||||
exit 1
|
||||
fi
|
||||
success "Integration tests passed"
|
||||
}
|
||||
|
||||
validate_package_build() {
|
||||
log "Validating package build..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
source "$VENV_PATH/bin/activate"
|
||||
|
||||
# Install build tools
|
||||
pip install build twine
|
||||
|
||||
# Build package
|
||||
if ! python -m build; then
|
||||
error "Package build failed"
|
||||
exit 1
|
||||
fi
|
||||
success "Package build passed"
|
||||
|
||||
# Check package
|
||||
if ! python -m twine check dist/*; then
|
||||
error "Package check failed"
|
||||
exit 1
|
||||
fi
|
||||
success "Package check passed"
|
||||
|
||||
# Clean up build artifacts
|
||||
rm -rf build/ dist/ *.egg-info/
|
||||
}
|
||||
|
||||
generate_report() {
|
||||
log "Generating integration report..."
|
||||
|
||||
cat > "$PROJECT_ROOT/integration_report.md" << EOF
|
||||
# WiFi-DensePose Integration Validation Report
|
||||
|
||||
**Date:** $(date)
|
||||
**Status:** ✅ PASSED
|
||||
|
||||
## Validation Results
|
||||
|
||||
### Prerequisites
|
||||
- ✅ Python version check
|
||||
- ✅ Virtual environment setup
|
||||
- ✅ Dependencies installation
|
||||
|
||||
### Package Structure
|
||||
- ✅ Required files present
|
||||
- ✅ Directory structure valid
|
||||
- ✅ Python imports working
|
||||
|
||||
### Core Components
|
||||
- ✅ Configuration management
|
||||
- ✅ Database integration
|
||||
- ✅ API endpoints
|
||||
- ✅ CLI interface
|
||||
- ✅ Background tasks
|
||||
|
||||
### Testing
|
||||
- ✅ Integration tests passed
|
||||
- ✅ Package build successful
|
||||
|
||||
## System Information
|
||||
|
||||
**Python Version:** $(python --version)
|
||||
**Package Version:** $(python -c "import src; print(src.__version__)")
|
||||
**Environment:** $(python -c "from src.config.settings import get_settings; print(get_settings().environment)")
|
||||
|
||||
## Next Steps
|
||||
|
||||
The WiFi-DensePose system has been successfully integrated and validated.
|
||||
You can now:
|
||||
|
||||
1. Start the server: \`wifi-densepose start\`
|
||||
2. Check status: \`wifi-densepose status\`
|
||||
3. View configuration: \`wifi-densepose config show\`
|
||||
4. Run tests: \`pytest tests/\`
|
||||
|
||||
For more information, see the documentation in the \`docs/\` directory.
|
||||
EOF
|
||||
|
||||
success "Integration report generated: integration_report.md"
|
||||
}
|
||||
|
||||
main() {
|
||||
log "Starting WiFi-DensePose integration validation..."
|
||||
|
||||
# Trap cleanup on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
# Run validation steps
|
||||
check_prerequisites
|
||||
validate_package_structure
|
||||
validate_imports
|
||||
validate_configuration
|
||||
validate_database
|
||||
validate_api_endpoints
|
||||
validate_cli
|
||||
validate_background_tasks
|
||||
run_integration_tests
|
||||
validate_package_build
|
||||
generate_report
|
||||
|
||||
success "🎉 All integration validations passed!"
|
||||
log "Integration validation completed successfully"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user