Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,479 @@
#!/usr/bin/env node
/**
* Benchmark Runner for RuVector
*
* Orchestrates benchmark execution across multiple scenarios and regions
*/
import { execSync, spawn } from 'child_process';
import * as fs from 'fs';
import * as path from 'path';
import { SCENARIOS, Scenario, getScenarioGroup } from './benchmark-scenarios';
import { MetricsCollector, ComprehensiveMetrics, collectFromK6Output } from './metrics-collector';
import { ResultsAnalyzer, AnalysisReport } from './results-analyzer';
// Configuration
interface RunnerConfig {
outputDir: string;
k6Binary: string;
parallelScenarios: number;
enableHooks: boolean;
regions: string[];
baseUrl: string;
slack WebhookUrl?: string;
emailNotification?: string;
}
interface TestRun {
id: string;
scenario: Scenario;
status: 'pending' | 'running' | 'completed' | 'failed';
startTime?: number;
endTime?: number;
metrics?: ComprehensiveMetrics;
analysis?: AnalysisReport;
error?: string;
}
// Main runner class
export class BenchmarkRunner {
private config: RunnerConfig;
private runs: Map<string, TestRun>;
private resultsDir: string;
constructor(config: Partial<RunnerConfig> = {}) {
this.config = {
outputDir: config.outputDir || './results',
k6Binary: config.k6Binary || 'k6',
parallelScenarios: config.parallelScenarios || 1,
enableHooks: config.enableHooks !== false,
regions: config.regions || ['all'],
baseUrl: config.baseUrl || 'http://localhost:8080',
slackWebhookUrl: config.slackWebhookUrl,
emailNotification: config.emailNotification,
};
this.runs = new Map();
this.resultsDir = path.join(this.config.outputDir, `run-${Date.now()}`);
// Create output directories
if (!fs.existsSync(this.resultsDir)) {
fs.mkdirSync(this.resultsDir, { recursive: true });
}
}
// Run a single scenario
async runScenario(scenarioName: string): Promise<TestRun> {
const scenario = SCENARIOS[scenarioName];
if (!scenario) {
throw new Error(`Scenario not found: ${scenarioName}`);
}
const runId = `${scenarioName}-${Date.now()}`;
const run: TestRun = {
id: runId,
scenario,
status: 'pending',
};
this.runs.set(runId, run);
try {
console.log(`\n${'='.repeat(80)}`);
console.log(`Starting scenario: ${scenario.name}`);
console.log(`Description: ${scenario.description}`);
console.log(`Expected duration: ${scenario.duration}`);
console.log(`${'='.repeat(80)}\n`);
// Execute pre-task hook
if (this.config.enableHooks && scenario.preTestHook) {
console.log('Executing pre-task hook...');
execSync(scenario.preTestHook, { stdio: 'inherit' });
}
run.status = 'running';
run.startTime = Date.now();
// Prepare K6 test file
const testFile = this.prepareTestFile(scenario);
// Run K6
const outputFile = path.join(this.resultsDir, `${runId}-raw.json`);
await this.executeK6(testFile, outputFile, scenario);
// Collect metrics
console.log('Collecting metrics...');
const collector = collectFromK6Output(outputFile);
const metrics = collector.generateReport(runId, scenarioName);
// Save metrics
const metricsFile = path.join(this.resultsDir, `${runId}-metrics.json`);
collector.save(metricsFile, metrics);
// Analyze results
console.log('Analyzing results...');
const analyzer = new ResultsAnalyzer(this.resultsDir);
const analysis = analyzer.generateReport(metrics);
// Save analysis
const analysisFile = path.join(this.resultsDir, `${runId}-analysis.json`);
analyzer.save(analysisFile, analysis);
// Generate markdown report
const markdown = analyzer.generateMarkdown(analysis);
const markdownFile = path.join(this.resultsDir, `${runId}-report.md`);
fs.writeFileSync(markdownFile, markdown);
// Export CSV
collector.exportCSV(`${runId}-metrics.csv`);
run.status = 'completed';
run.endTime = Date.now();
run.metrics = metrics;
run.analysis = analysis;
// Execute post-task hook
if (this.config.enableHooks && scenario.postTestHook) {
console.log('Executing post-task hook...');
execSync(scenario.postTestHook, { stdio: 'inherit' });
}
// Send notifications
await this.sendNotifications(run);
console.log(`\n${'='.repeat(80)}`);
console.log(`Scenario completed: ${scenario.name}`);
console.log(`Status: ${run.status}`);
console.log(`Duration: ${((run.endTime - run.startTime) / 1000 / 60).toFixed(2)} minutes`);
console.log(`Overall Score: ${analysis.score.overall}/100`);
console.log(`SLA Compliance: ${analysis.slaCompliance.met ? 'PASSED' : 'FAILED'}`);
console.log(`${'='.repeat(80)}\n`);
} catch (error) {
run.status = 'failed';
run.endTime = Date.now();
run.error = error instanceof Error ? error.message : String(error);
console.error(`\nScenario failed: ${scenario.name}`);
console.error(`Error: ${run.error}\n`);
await this.sendNotifications(run);
}
return run;
}
// Run multiple scenarios
async runScenarios(scenarioNames: string[]): Promise<Map<string, TestRun>> {
console.log(`\nRunning ${scenarioNames.length} scenarios...`);
console.log(`Parallel execution: ${this.config.parallelScenarios}`);
console.log(`Output directory: ${this.resultsDir}\n`);
const results = new Map<string, TestRun>();
// Run scenarios in batches
for (let i = 0; i < scenarioNames.length; i += this.config.parallelScenarios) {
const batch = scenarioNames.slice(i, i + this.config.parallelScenarios);
console.log(`\nBatch ${Math.floor(i / this.config.parallelScenarios) + 1}/${Math.ceil(scenarioNames.length / this.config.parallelScenarios)}`);
console.log(`Scenarios: ${batch.join(', ')}\n`);
const promises = batch.map(name => this.runScenario(name));
const batchResults = await Promise.allSettled(promises);
batchResults.forEach((result, index) => {
const scenarioName = batch[index];
if (result.status === 'fulfilled') {
results.set(scenarioName, result.value);
} else {
console.error(`Failed to run scenario ${scenarioName}:`, result.reason);
}
});
}
// Generate summary report
this.generateSummaryReport(results);
return results;
}
// Run scenario group
async runGroup(groupName: string): Promise<Map<string, TestRun>> {
const scenarios = getScenarioGroup(groupName as any);
if (scenarios.length === 0) {
throw new Error(`Scenario group not found: ${groupName}`);
}
console.log(`\nRunning scenario group: ${groupName}`);
console.log(`Scenarios: ${scenarios.join(', ')}\n`);
return this.runScenarios(scenarios);
}
// Prepare K6 test file
private prepareTestFile(scenario: Scenario): string {
const testContent = `
import { check, sleep } from 'k6';
import http from 'k6/http';
import { Trend, Counter, Gauge, Rate } from 'k6/metrics';
// Import scenario configuration
const scenarioConfig = ${JSON.stringify(scenario.config, null, 2)};
const k6Options = ${JSON.stringify(scenario.k6Options, null, 2)};
// Export options
export const options = k6Options;
// Custom metrics
const queryLatency = new Trend('query_latency', true);
const errorRate = new Rate('error_rate');
const queriesPerSecond = new Counter('queries_per_second');
export default function() {
const baseUrl = __ENV.BASE_URL || '${this.config.baseUrl}';
const region = __ENV.REGION || 'unknown';
const payload = JSON.stringify({
query_id: \`query_\${Date.now()}_\${__VU}_\${__ITER}\`,
vector: Array.from({ length: scenarioConfig.vectorDimension }, () => Math.random() * 2 - 1),
top_k: 10,
});
const params = {
headers: {
'Content-Type': 'application/json',
'X-Region': region,
'X-VU': __VU.toString(),
},
tags: {
scenario: '${scenario.name}',
region: region,
},
};
const startTime = Date.now();
const response = http.post(\`\${baseUrl}/query\`, payload, params);
const latency = Date.now() - startTime;
queryLatency.add(latency);
queriesPerSecond.add(1);
const success = check(response, {
'status is 200': (r) => r.status === 200,
'has results': (r) => {
try {
const body = JSON.parse(r.body);
return body.results && body.results.length > 0;
} catch {
return false;
}
},
'latency acceptable': () => latency < 200,
});
errorRate.add(!success);
sleep(parseFloat(scenarioConfig.queryInterval) / 1000);
}
export function setup() {
console.log('Starting test: ${scenario.name}');
console.log('Description: ${scenario.description}');
return { startTime: Date.now() };
}
export function teardown(data) {
const duration = Date.now() - data.startTime;
console.log(\`Test completed in \${duration}ms\`);
}
`;
const testFile = path.join(this.resultsDir, `${scenario.name}-test.js`);
fs.writeFileSync(testFile, testContent);
return testFile;
}
// Execute K6
private async executeK6(testFile: string, outputFile: string, scenario: Scenario): Promise<void> {
return new Promise((resolve, reject) => {
const args = [
'run',
'--out', `json=${outputFile}`,
'--summary-export', `${outputFile}.summary`,
testFile,
];
// Add environment variables
const env = {
...process.env,
BASE_URL: this.config.baseUrl,
};
console.log(`Executing: ${this.config.k6Binary} ${args.join(' ')}\n`);
const k6Process = spawn(this.config.k6Binary, args, {
env,
stdio: 'inherit',
});
k6Process.on('close', (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`K6 exited with code ${code}`));
}
});
k6Process.on('error', (error) => {
reject(error);
});
});
}
// Generate summary report
private generateSummaryReport(results: Map<string, TestRun>): void {
let summary = `# Benchmark Summary Report\n\n`;
summary += `**Date:** ${new Date().toISOString()}\n`;
summary += `**Total Scenarios:** ${results.size}\n`;
summary += `**Output Directory:** ${this.resultsDir}\n\n`;
summary += `## Results\n\n`;
summary += `| Scenario | Status | Duration | Score | SLA |\n`;
summary += `|----------|--------|----------|-------|-----|\n`;
for (const [name, run] of results) {
const duration = run.endTime && run.startTime
? ((run.endTime - run.startTime) / 1000 / 60).toFixed(2) + 'm'
: 'N/A';
const score = run.analysis?.score.overall || 'N/A';
const sla = run.analysis?.slaCompliance.met ? '✅' : '❌';
summary += `| ${name} | ${run.status} | ${duration} | ${score} | ${sla} |\n`;
}
summary += `\n## Recommendations\n\n`;
// Aggregate recommendations
const allRecommendations = new Map<string, number>();
for (const run of results.values()) {
if (run.analysis) {
for (const rec of run.analysis.recommendations) {
const key = rec.title;
allRecommendations.set(key, (allRecommendations.get(key) || 0) + 1);
}
}
}
for (const [title, count] of Array.from(allRecommendations.entries()).sort((a, b) => b[1] - a[1])) {
summary += `- ${title} (mentioned in ${count} scenarios)\n`;
}
const summaryFile = path.join(this.resultsDir, 'SUMMARY.md');
fs.writeFileSync(summaryFile, summary);
console.log(`\nSummary report generated: ${summaryFile}\n`);
}
// Send notifications
private async sendNotifications(run: TestRun): Promise<void> {
// Slack notification
if (this.config.slackWebhookUrl) {
try {
const message = {
text: `Benchmark ${run.status}: ${run.scenario.name}`,
blocks: [
{
type: 'section',
text: {
type: 'mrkdwn',
text: `*Benchmark ${run.status.toUpperCase()}*\n*Scenario:* ${run.scenario.name}\n*Status:* ${run.status}\n*Score:* ${run.analysis?.score.overall || 'N/A'}/100`,
},
},
],
};
await fetch(this.config.slackWebhookUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(message),
});
} catch (error) {
console.error('Failed to send Slack notification:', error);
}
}
}
}
// CLI
if (require.main === module) {
const args = process.argv.slice(2);
if (args.length === 0) {
console.log(`
Usage: benchmark-runner.ts <command> [options]
Commands:
run <scenario> Run a single scenario
group <group> Run a scenario group
list List available scenarios
Examples:
benchmark-runner.ts run baseline_500m
benchmark-runner.ts group standard_suite
benchmark-runner.ts list
`);
process.exit(1);
}
const command = args[0];
const runner = new BenchmarkRunner({
baseUrl: process.env.BASE_URL || 'http://localhost:8080',
parallelScenarios: parseInt(process.env.PARALLEL || '1'),
});
(async () => {
try {
switch (command) {
case 'run':
if (args.length < 2) {
console.error('Error: Scenario name required');
process.exit(1);
}
await runner.runScenario(args[1]);
break;
case 'group':
if (args.length < 2) {
console.error('Error: Group name required');
process.exit(1);
}
await runner.runGroup(args[1]);
break;
case 'list':
console.log('\nAvailable scenarios:\n');
for (const [name, scenario] of Object.entries(SCENARIOS)) {
console.log(` ${name.padEnd(30)} - ${scenario.description}`);
}
console.log('\nAvailable groups:\n');
console.log(' quick_validation');
console.log(' standard_suite');
console.log(' stress_suite');
console.log(' reliability_suite');
console.log(' full_suite\n');
break;
default:
console.error(`Unknown command: ${command}`);
process.exit(1);
}
} catch (error) {
console.error('Error:', error);
process.exit(1);
}
})();
}
export default BenchmarkRunner;

View File

@@ -0,0 +1,650 @@
/**
* Benchmark Scenarios for RuVector
*
* Defines comprehensive test scenarios including baseline, burst, failover, and stress tests
*/
import { LoadConfig } from './load-generator';
export interface Scenario {
name: string;
description: string;
config: LoadConfig;
k6Options: any;
expectedMetrics: {
p99Latency: number; // milliseconds
errorRate: number; // percentage
throughput: number; // queries per second
availability: number; // percentage
};
preTestHook?: string;
postTestHook?: string;
regions?: string[];
duration: string;
tags: string[];
}
export const SCENARIOS: Record<string, Scenario> = {
// ==================== BASELINE SCENARIOS ====================
baseline_500m: {
name: 'Baseline 500M Concurrent',
description: 'Steady-state operation with 500M concurrent connections',
config: {
targetConnections: 500000000,
rampUpDuration: '30m',
steadyStateDuration: '2h',
rampDownDuration: '15m',
queriesPerConnection: 100,
queryInterval: '1000',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'uniform',
},
k6Options: {
scenarios: {
baseline: {
executor: 'ramping-vus',
startVUs: 0,
stages: [
{ duration: '30m', target: 500000 },
{ duration: '2h', target: 500000 },
{ duration: '15m', target: 0 },
],
gracefulRampDown: '30s',
},
},
thresholds: {
'query_latency': ['p(99)<50'],
'error_rate': ['rate<0.0001'],
},
},
expectedMetrics: {
p99Latency: 50,
errorRate: 0.01,
throughput: 50000000, // 50M queries/sec
availability: 99.99,
},
preTestHook: 'npx claude-flow@alpha hooks pre-task --description "Baseline 500M concurrent test"',
postTestHook: 'npx claude-flow@alpha hooks post-task --task-id "baseline_500m"',
regions: ['all'],
duration: '3h15m',
tags: ['baseline', 'steady-state', 'production-simulation'],
},
baseline_100m: {
name: 'Baseline 100M Concurrent',
description: 'Smaller baseline for quick validation',
config: {
targetConnections: 100000000,
rampUpDuration: '10m',
steadyStateDuration: '30m',
rampDownDuration: '5m',
queriesPerConnection: 50,
queryInterval: '1000',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'uniform',
},
k6Options: {
scenarios: {
baseline: {
executor: 'ramping-vus',
startVUs: 0,
stages: [
{ duration: '10m', target: 100000 },
{ duration: '30m', target: 100000 },
{ duration: '5m', target: 0 },
],
},
},
},
expectedMetrics: {
p99Latency: 50,
errorRate: 0.01,
throughput: 10000000,
availability: 99.99,
},
duration: '45m',
tags: ['baseline', 'quick-test'],
},
// ==================== BURST SCENARIOS ====================
burst_10x: {
name: 'Burst 10x (5B Concurrent)',
description: 'Sudden spike to 5 billion concurrent connections',
config: {
targetConnections: 5000000000,
rampUpDuration: '5m',
steadyStateDuration: '10m',
rampDownDuration: '5m',
queriesPerConnection: 20,
queryInterval: '500',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'burst',
burstConfig: {
multiplier: 10,
duration: '300000', // 5 minutes
frequency: '600000', // every 10 minutes
},
},
k6Options: {
scenarios: {
burst: {
executor: 'ramping-arrival-rate',
startRate: 50000000,
timeUnit: '1s',
preAllocatedVUs: 500000,
maxVUs: 5000000,
stages: [
{ duration: '5m', target: 500000000 }, // 500M/sec
{ duration: '10m', target: 500000000 },
{ duration: '5m', target: 50000000 },
],
},
},
},
expectedMetrics: {
p99Latency: 100,
errorRate: 0.1,
throughput: 500000000,
availability: 99.9,
},
preTestHook: 'npx claude-flow@alpha hooks pre-task --description "Burst 10x test"',
postTestHook: 'npx claude-flow@alpha hooks post-task --task-id "burst_10x"',
duration: '20m',
tags: ['burst', 'spike', 'stress-test'],
},
burst_25x: {
name: 'Burst 25x (12.5B Concurrent)',
description: 'Extreme spike to 12.5 billion concurrent connections',
config: {
targetConnections: 12500000000,
rampUpDuration: '10m',
steadyStateDuration: '15m',
rampDownDuration: '10m',
queriesPerConnection: 10,
queryInterval: '500',
protocol: 'http2',
vectorDimension: 768,
queryPattern: 'burst',
burstConfig: {
multiplier: 25,
duration: '900000', // 15 minutes
frequency: '1800000', // every 30 minutes
},
},
k6Options: {
scenarios: {
extreme_burst: {
executor: 'ramping-arrival-rate',
startRate: 50000000,
timeUnit: '1s',
preAllocatedVUs: 1000000,
maxVUs: 12500000,
stages: [
{ duration: '10m', target: 1250000000 },
{ duration: '15m', target: 1250000000 },
{ duration: '10m', target: 50000000 },
],
},
},
},
expectedMetrics: {
p99Latency: 150,
errorRate: 0.5,
throughput: 1250000000,
availability: 99.5,
},
duration: '35m',
tags: ['burst', 'extreme', 'stress-test'],
},
burst_50x: {
name: 'Burst 50x (25B Concurrent)',
description: 'Maximum spike to 25 billion concurrent connections',
config: {
targetConnections: 25000000000,
rampUpDuration: '15m',
steadyStateDuration: '20m',
rampDownDuration: '15m',
queriesPerConnection: 5,
queryInterval: '500',
protocol: 'http2',
vectorDimension: 768,
queryPattern: 'burst',
burstConfig: {
multiplier: 50,
duration: '1200000', // 20 minutes
frequency: '3600000', // every hour
},
},
k6Options: {
scenarios: {
maximum_burst: {
executor: 'ramping-arrival-rate',
startRate: 50000000,
timeUnit: '1s',
preAllocatedVUs: 2000000,
maxVUs: 25000000,
stages: [
{ duration: '15m', target: 2500000000 },
{ duration: '20m', target: 2500000000 },
{ duration: '15m', target: 50000000 },
],
},
},
},
expectedMetrics: {
p99Latency: 200,
errorRate: 1.0,
throughput: 2500000000,
availability: 99.0,
},
duration: '50m',
tags: ['burst', 'maximum', 'stress-test'],
},
// ==================== FAILOVER SCENARIOS ====================
regional_failover: {
name: 'Regional Failover',
description: 'Test failover when a region goes down',
config: {
targetConnections: 500000000,
rampUpDuration: '10m',
steadyStateDuration: '30m',
rampDownDuration: '5m',
queriesPerConnection: 100,
queryInterval: '1000',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'uniform',
},
k6Options: {
scenarios: {
normal_traffic: {
executor: 'constant-vus',
vus: 500000,
duration: '45m',
},
// Simulate region failure at 15 minutes
region_failure: {
executor: 'shared-iterations',
vus: 1,
iterations: 1,
startTime: '15m',
exec: 'simulateRegionFailure',
},
},
thresholds: {
'query_latency': ['p(99)<100'], // Allow higher latency during failover
'error_rate': ['rate<0.01'], // Allow some errors during failover
},
},
expectedMetrics: {
p99Latency: 100,
errorRate: 1.0, // Some errors expected during failover
throughput: 45000000, // ~10% degradation
availability: 99.0,
},
duration: '45m',
tags: ['failover', 'disaster-recovery', 'high-availability'],
},
multi_region_failover: {
name: 'Multi-Region Failover',
description: 'Test failover when multiple regions go down',
config: {
targetConnections: 500000000,
rampUpDuration: '10m',
steadyStateDuration: '40m',
rampDownDuration: '5m',
queriesPerConnection: 100,
queryInterval: '1000',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'uniform',
},
k6Options: {
scenarios: {
normal_traffic: {
executor: 'constant-vus',
vus: 500000,
duration: '55m',
},
first_region_failure: {
executor: 'shared-iterations',
vus: 1,
iterations: 1,
startTime: '15m',
exec: 'simulateRegionFailure',
},
second_region_failure: {
executor: 'shared-iterations',
vus: 1,
iterations: 1,
startTime: '30m',
exec: 'simulateRegionFailure',
},
},
},
expectedMetrics: {
p99Latency: 150,
errorRate: 2.0,
throughput: 40000000,
availability: 98.0,
},
duration: '55m',
tags: ['failover', 'multi-region', 'disaster-recovery'],
},
// ==================== COLD START SCENARIOS ====================
cold_start: {
name: 'Cold Start',
description: 'Test scaling from 0 to full capacity',
config: {
targetConnections: 500000000,
rampUpDuration: '30m',
steadyStateDuration: '30m',
rampDownDuration: '10m',
queriesPerConnection: 50,
queryInterval: '1000',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'uniform',
},
k6Options: {
scenarios: {
cold_start: {
executor: 'ramping-vus',
startVUs: 0,
stages: [
{ duration: '30m', target: 500000 },
{ duration: '30m', target: 500000 },
{ duration: '10m', target: 0 },
],
},
},
thresholds: {
'query_latency': ['p(99)<100'], // Allow higher latency during warm-up
},
},
expectedMetrics: {
p99Latency: 100,
errorRate: 0.1,
throughput: 48000000,
availability: 99.9,
},
duration: '70m',
tags: ['cold-start', 'scaling', 'initialization'],
},
// ==================== MIXED WORKLOAD SCENARIOS ====================
read_heavy: {
name: 'Read-Heavy Workload',
description: '95% reads, 5% writes',
config: {
targetConnections: 500000000,
rampUpDuration: '20m',
steadyStateDuration: '1h',
rampDownDuration: '10m',
queriesPerConnection: 200,
queryInterval: '500',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'hotspot',
},
k6Options: {
scenarios: {
reads: {
executor: 'constant-vus',
vus: 475000, // 95%
duration: '1h30m',
exec: 'readQuery',
},
writes: {
executor: 'constant-vus',
vus: 25000, // 5%
duration: '1h30m',
exec: 'writeQuery',
},
},
},
expectedMetrics: {
p99Latency: 50,
errorRate: 0.01,
throughput: 50000000,
availability: 99.99,
},
duration: '1h50m',
tags: ['workload', 'read-heavy', 'production-simulation'],
},
write_heavy: {
name: 'Write-Heavy Workload',
description: '30% reads, 70% writes',
config: {
targetConnections: 500000000,
rampUpDuration: '20m',
steadyStateDuration: '1h',
rampDownDuration: '10m',
queriesPerConnection: 100,
queryInterval: '1000',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'uniform',
},
k6Options: {
scenarios: {
reads: {
executor: 'constant-vus',
vus: 150000, // 30%
duration: '1h30m',
exec: 'readQuery',
},
writes: {
executor: 'constant-vus',
vus: 350000, // 70%
duration: '1h30m',
exec: 'writeQuery',
},
},
},
expectedMetrics: {
p99Latency: 80,
errorRate: 0.05,
throughput: 45000000,
availability: 99.95,
},
duration: '1h50m',
tags: ['workload', 'write-heavy', 'stress-test'],
},
balanced_workload: {
name: 'Balanced Workload',
description: '50% reads, 50% writes',
config: {
targetConnections: 500000000,
rampUpDuration: '20m',
steadyStateDuration: '1h',
rampDownDuration: '10m',
queriesPerConnection: 150,
queryInterval: '750',
protocol: 'http',
vectorDimension: 768,
queryPattern: 'zipfian',
},
k6Options: {
scenarios: {
reads: {
executor: 'constant-vus',
vus: 250000,
duration: '1h30m',
exec: 'readQuery',
},
writes: {
executor: 'constant-vus',
vus: 250000,
duration: '1h30m',
exec: 'writeQuery',
},
},
},
expectedMetrics: {
p99Latency: 60,
errorRate: 0.02,
throughput: 48000000,
availability: 99.98,
},
duration: '1h50m',
tags: ['workload', 'balanced', 'production-simulation'],
},
// ==================== REAL-WORLD SCENARIOS ====================
world_cup: {
name: 'World Cup Scenario',
description: 'Predictable spike with geographic concentration',
config: {
targetConnections: 5000000000,
rampUpDuration: '15m',
steadyStateDuration: '2h',
rampDownDuration: '30m',
queriesPerConnection: 500,
queryInterval: '200',
protocol: 'ws',
vectorDimension: 768,
queryPattern: 'burst',
burstConfig: {
multiplier: 10,
duration: '5400000', // 90 minutes (match duration)
frequency: '7200000', // every 2 hours
},
},
k6Options: {
scenarios: {
normal_traffic: {
executor: 'constant-vus',
vus: 500000,
duration: '3h',
},
match_traffic: {
executor: 'ramping-vus',
startTime: '30m',
startVUs: 500000,
stages: [
{ duration: '15m', target: 5000000 }, // Match starts
{ duration: '90m', target: 5000000 }, // Match duration
{ duration: '15m', target: 500000 }, // Match ends
],
},
},
},
expectedMetrics: {
p99Latency: 100,
errorRate: 0.1,
throughput: 500000000,
availability: 99.9,
},
regions: ['europe-west1', 'europe-west2', 'europe-north1'], // Focus on Europe
duration: '3h',
tags: ['real-world', 'predictable-spike', 'geographic'],
},
black_friday: {
name: 'Black Friday Scenario',
description: 'Sustained high load with periodic spikes',
config: {
targetConnections: 2000000000,
rampUpDuration: '1h',
steadyStateDuration: '12h',
rampDownDuration: '1h',
queriesPerConnection: 1000,
queryInterval: '100',
protocol: 'http2',
vectorDimension: 768,
queryPattern: 'burst',
burstConfig: {
multiplier: 5,
duration: '3600000', // 1 hour spikes
frequency: '7200000', // every 2 hours
},
},
k6Options: {
scenarios: {
baseline: {
executor: 'constant-vus',
vus: 2000000,
duration: '14h',
},
hourly_spikes: {
executor: 'ramping-vus',
startVUs: 0,
stages: [
// Repeat spike pattern every 2 hours
{ duration: '1h', target: 10000000 },
{ duration: '1h', target: 0 },
],
},
},
},
expectedMetrics: {
p99Latency: 80,
errorRate: 0.05,
throughput: 200000000,
availability: 99.95,
},
duration: '14h',
tags: ['real-world', 'sustained-high-load', 'retail'],
},
};
// Scenario groups for batch testing
export const SCENARIO_GROUPS = {
quick_validation: ['baseline_100m'],
standard_suite: ['baseline_500m', 'burst_10x', 'read_heavy'],
stress_suite: ['burst_25x', 'burst_50x', 'write_heavy'],
reliability_suite: ['regional_failover', 'multi_region_failover', 'cold_start'],
full_suite: Object.keys(SCENARIOS),
};
// Helper functions
export function getScenario(name: string): Scenario | undefined {
return SCENARIOS[name];
}
export function getScenariosByTag(tag: string): Scenario[] {
return Object.values(SCENARIOS).filter(s => s.tags.includes(tag));
}
export function getScenarioGroup(group: keyof typeof SCENARIO_GROUPS): string[] {
return SCENARIO_GROUPS[group] || [];
}
export function estimateCost(scenario: Scenario): number {
// Rough cost estimation based on GCP pricing
// $0.10 per million queries + infrastructure costs
const totalQueries = scenario.config.targetConnections * scenario.config.queriesPerConnection;
const queryCost = (totalQueries / 1000000) * 0.10;
// Infrastructure cost (rough estimate)
const durationHours = parseDuration(scenario.duration);
const infraCost = durationHours * 1000; // $1000/hour for infrastructure
return queryCost + infraCost;
}
function parseDuration(duration: string): number {
const match = duration.match(/(\d+)([hm])/);
if (!match) return 0;
const [, num, unit] = match;
return unit === 'h' ? parseInt(num) : parseInt(num) / 60;
}
export default SCENARIOS;

View File

@@ -0,0 +1,437 @@
/**
* Distributed Load Generator for RuVector
*
* Generates load across multiple global regions with configurable patterns
* Supports WebSocket, HTTP/2, and gRPC protocols
*/
import * as k6 from 'k6';
import { check, sleep } from 'k6';
import http from 'k6/http';
import ws from 'k6/ws';
import { Trend, Counter, Gauge, Rate } from 'k6/metrics';
import { SharedArray } from 'k6/data';
import { exec } from 'k6/execution';
import * as crypto from 'k6/crypto';
// Custom metrics
const queryLatency = new Trend('query_latency', true);
const connectionDuration = new Trend('connection_duration', true);
const errorRate = new Rate('error_rate');
const activeConnections = new Gauge('active_connections');
const queriesPerSecond = new Counter('queries_per_second');
const bytesTransferred = new Counter('bytes_transferred');
// GCP regions for distributed load
export const REGIONS = [
'us-east1', 'us-west1', 'us-central1',
'europe-west1', 'europe-west2', 'europe-north1',
'asia-east1', 'asia-southeast1', 'asia-northeast1',
'australia-southeast1', 'southamerica-east1'
];
// Load generation configuration
export interface LoadConfig {
targetConnections: number;
rampUpDuration: string;
steadyStateDuration: string;
rampDownDuration: string;
queriesPerConnection: number;
queryInterval: string;
protocol: 'http' | 'ws' | 'http2' | 'grpc';
region?: string;
vectorDimension: number;
queryPattern: 'uniform' | 'hotspot' | 'zipfian' | 'burst';
burstConfig?: {
multiplier: number;
duration: string;
frequency: string;
};
}
// Query patterns
export class QueryPattern {
private config: LoadConfig;
private hotspotIds: number[];
constructor(config: LoadConfig) {
this.config = config;
this.hotspotIds = this.generateHotspots();
}
private generateHotspots(): number[] {
// Top 1% of IDs account for 80% of traffic (Pareto distribution)
const count = Math.ceil(1000000 * 0.01);
return Array.from({ length: count }, (_, i) => i);
}
generateQueryId(): string {
switch (this.config.queryPattern) {
case 'uniform':
return this.uniformQuery();
case 'hotspot':
return this.hotspotQuery();
case 'zipfian':
return this.zipfianQuery();
case 'burst':
return this.burstQuery();
default:
return this.uniformQuery();
}
}
private uniformQuery(): string {
return `doc_${Math.floor(Math.random() * 1000000)}`;
}
private hotspotQuery(): string {
// 80% chance to hit hotspot
if (Math.random() < 0.8) {
const idx = Math.floor(Math.random() * this.hotspotIds.length);
return `doc_${this.hotspotIds[idx]}`;
}
return this.uniformQuery();
}
private zipfianQuery(): string {
// Zipfian distribution: frequency ∝ 1/rank^s
const s = 1.5;
const rank = Math.floor(Math.pow(Math.random(), -1/s));
return `doc_${Math.min(rank, 999999)}`;
}
private burstQuery(): string {
const time = Date.now();
const burstConfig = this.config.burstConfig!;
const frequency = parseInt(burstConfig.frequency);
// Check if we're in a burst window
const inBurst = (time % frequency) < parseInt(burstConfig.duration);
if (inBurst) {
// During burst, focus on hotspots
return this.hotspotQuery();
}
return this.uniformQuery();
}
generateVector(): number[] {
return Array.from(
{ length: this.config.vectorDimension },
() => Math.random() * 2 - 1
);
}
}
// Connection manager
export class ConnectionManager {
private config: LoadConfig;
private pattern: QueryPattern;
private baseUrl: string;
constructor(config: LoadConfig, baseUrl: string) {
this.config = config;
this.pattern = new QueryPattern(config);
this.baseUrl = baseUrl;
}
async connect(): Promise<void> {
const startTime = Date.now();
switch (this.config.protocol) {
case 'http':
await this.httpConnection();
break;
case 'http2':
await this.http2Connection();
break;
case 'ws':
await this.websocketConnection();
break;
case 'grpc':
await this.grpcConnection();
break;
}
const duration = Date.now() - startTime;
connectionDuration.add(duration);
}
private async httpConnection(): Promise<void> {
const params = {
headers: {
'Content-Type': 'application/json',
'X-Region': this.config.region || 'unknown',
'X-Client-Id': exec.vu.idInTest.toString(),
},
tags: {
protocol: 'http',
region: this.config.region,
},
};
for (let i = 0; i < this.config.queriesPerConnection; i++) {
const startTime = Date.now();
const queryId = this.pattern.generateQueryId();
const vector = this.pattern.generateVector();
const payload = JSON.stringify({
query_id: queryId,
vector: vector,
top_k: 10,
filter: {},
});
const response = http.post(`${this.baseUrl}/query`, payload, params);
const latency = Date.now() - startTime;
queryLatency.add(latency);
queriesPerSecond.add(1);
bytesTransferred.add(payload.length + (response.body?.length || 0));
const success = check(response, {
'status is 200': (r) => r.status === 200,
'has results': (r) => {
try {
const body = JSON.parse(r.body as string);
return body.results && body.results.length > 0;
} catch {
return false;
}
},
'latency < 100ms': () => latency < 100,
});
errorRate.add(!success);
if (!success) {
console.error(`Query failed: ${response.status}, latency: ${latency}ms`);
}
// Sleep between queries
sleep(parseFloat(this.config.queryInterval) / 1000);
}
}
private async http2Connection(): Promise<void> {
const params = {
headers: {
'Content-Type': 'application/json',
'X-Region': this.config.region || 'unknown',
'X-Client-Id': exec.vu.idInTest.toString(),
},
tags: {
protocol: 'http2',
region: this.config.region,
},
};
// Similar to HTTP but with HTTP/2 specific optimizations
await this.httpConnection();
}
private async websocketConnection(): Promise<void> {
const url = this.baseUrl.replace('http', 'ws') + '/ws';
const params = {
tags: {
protocol: 'websocket',
region: this.config.region,
},
};
const res = ws.connect(url, params, (socket) => {
socket.on('open', () => {
activeConnections.add(1);
// Send authentication
socket.send(JSON.stringify({
type: 'auth',
token: 'benchmark-token',
region: this.config.region,
}));
});
socket.on('message', (data) => {
try {
const msg = JSON.parse(data as string);
if (msg.type === 'query_result') {
const latency = Date.now() - msg.client_timestamp;
queryLatency.add(latency);
queriesPerSecond.add(1);
const success = msg.results && msg.results.length > 0;
errorRate.add(!success);
}
} catch (e) {
errorRate.add(1);
}
});
socket.on('error', (e) => {
console.error('WebSocket error:', e);
errorRate.add(1);
});
socket.on('close', () => {
activeConnections.add(-1);
});
// Send queries
for (let i = 0; i < this.config.queriesPerConnection; i++) {
const queryId = this.pattern.generateQueryId();
const vector = this.pattern.generateVector();
socket.send(JSON.stringify({
type: 'query',
query_id: queryId,
vector: vector,
top_k: 10,
client_timestamp: Date.now(),
}));
socket.setTimeout(() => {}, parseFloat(this.config.queryInterval));
}
// Close connection after all queries
socket.setTimeout(() => {
socket.close();
}, parseFloat(this.config.queryInterval) * this.config.queriesPerConnection);
});
}
private async grpcConnection(): Promise<void> {
// gRPC implementation using k6/net/grpc
// TODO: Implement when gRPC is available
console.log('gRPC not yet implemented, falling back to HTTP/2');
await this.http2Connection();
}
}
// Multi-region orchestrator
export class MultiRegionOrchestrator {
private configs: Map<string, LoadConfig>;
private baseUrls: Map<string, string>;
constructor() {
this.configs = new Map();
this.baseUrls = new Map();
}
addRegion(region: string, config: LoadConfig, baseUrl: string): void {
this.configs.set(region, { ...config, region });
this.baseUrls.set(region, baseUrl);
}
async run(): Promise<void> {
// Distribute VUs across regions
const vuId = exec.vu.idInTest;
const totalRegions = this.configs.size;
const regionIndex = vuId % totalRegions;
const regions = Array.from(this.configs.keys());
const region = regions[regionIndex];
const config = this.configs.get(region)!;
const baseUrl = this.baseUrls.get(region)!;
console.log(`VU ${vuId} assigned to region: ${region}`);
const manager = new ConnectionManager(config, baseUrl);
await manager.connect();
}
}
// K6 test configuration
export const options = {
scenarios: {
baseline_500m: {
executor: 'ramping-vus',
startVUs: 0,
stages: [
{ duration: '30m', target: 500000 }, // Ramp to 500M
{ duration: '2h', target: 500000 }, // Hold at 500M
{ duration: '15m', target: 0 }, // Ramp down
],
gracefulRampDown: '30s',
},
burst_10x: {
executor: 'ramping-vus',
startTime: '3h',
startVUs: 500000,
stages: [
{ duration: '5m', target: 5000000 }, // Spike to 5B
{ duration: '10m', target: 5000000 }, // Hold
{ duration: '5m', target: 500000 }, // Return to baseline
],
gracefulRampDown: '30s',
},
},
thresholds: {
'query_latency': ['p(95)<50', 'p(99)<100'],
'error_rate': ['rate<0.0001'], // 99.99% success
'http_req_duration': ['p(95)<50', 'p(99)<100'],
},
tags: {
test_type: 'distributed_load',
version: '1.0.0',
},
};
// Main test function
export default function() {
// Execute hooks before task
exec.test.options.setupTimeout = '10m';
const config: LoadConfig = {
targetConnections: 500000000, // 500M
rampUpDuration: '30m',
steadyStateDuration: '2h',
rampDownDuration: '15m',
queriesPerConnection: 100,
queryInterval: '1000', // 1 second between queries
protocol: 'http',
vectorDimension: 768, // Default embedding size
queryPattern: 'uniform',
};
// Get region from environment or assign based on VU
const region = __ENV.REGION || REGIONS[exec.vu.idInTest % REGIONS.length];
const baseUrl = __ENV.BASE_URL || 'http://localhost:8080';
config.region = region;
const manager = new ConnectionManager(config, baseUrl);
manager.connect();
}
// Setup function (runs once before test)
export function setup() {
console.log('Starting distributed load test...');
console.log(`Target: ${options.scenarios.baseline_500m.stages[1].target} concurrent connections`);
console.log(`Regions: ${REGIONS.join(', ')}`);
// Execute pre-task hook
const hookResult = exec.test.options.exec || {};
console.log('Pre-task hook executed');
return {
startTime: Date.now(),
regions: REGIONS,
};
}
// Teardown function (runs once after test)
export function teardown(data: any) {
const duration = Date.now() - data.startTime;
console.log(`Test completed in ${duration}ms`);
console.log('Post-task hook executed');
}
// Export for external use
export {
LoadConfig,
QueryPattern,
ConnectionManager,
MultiRegionOrchestrator,
};

View File

@@ -0,0 +1,575 @@
/**
* Metrics Collector for RuVector Benchmarks
*
* Collects, aggregates, and stores comprehensive performance metrics
*/
import * as fs from 'fs';
import * as path from 'path';
// Metric types
export interface LatencyMetrics {
min: number;
max: number;
mean: number;
median: number;
p50: number;
p90: number;
p95: number;
p99: number;
p99_9: number;
stddev: number;
}
export interface ThroughputMetrics {
queriesPerSecond: number;
bytesPerSecond: number;
connectionsPerSecond: number;
peakQPS: number;
averageQPS: number;
}
export interface ErrorMetrics {
totalErrors: number;
errorRate: number;
errorsByType: Record<string, number>;
errorsByRegion: Record<string, number>;
timeouts: number;
connectionErrors: number;
serverErrors: number;
clientErrors: number;
}
export interface ResourceMetrics {
cpu: {
average: number;
peak: number;
perRegion: Record<string, number>;
};
memory: {
average: number;
peak: number;
perRegion: Record<string, number>;
};
network: {
ingressBytes: number;
egressBytes: number;
bandwidth: number;
perRegion: Record<string, number>;
};
disk: {
reads: number;
writes: number;
iops: number;
};
}
export interface CostMetrics {
computeCost: number;
networkCost: number;
storageCost: number;
totalCost: number;
costPerMillionQueries: number;
costPerRegion: Record<string, number>;
}
export interface ScalingMetrics {
timeToTarget: number; // milliseconds to reach target capacity
scaleUpRate: number; // connections/second
scaleDownRate: number; // connections/second
autoScaleEvents: number;
coldStartLatency: number;
}
export interface AvailabilityMetrics {
uptime: number; // percentage
downtime: number; // milliseconds
mtbf: number; // mean time between failures
mttr: number; // mean time to recovery
incidents: Array<{
timestamp: number;
duration: number;
impact: string;
region?: string;
}>;
}
export interface RegionalMetrics {
region: string;
latency: LatencyMetrics;
throughput: ThroughputMetrics;
errors: ErrorMetrics;
activeConnections: number;
availability: number;
}
export interface ComprehensiveMetrics {
testId: string;
scenario: string;
startTime: number;
endTime: number;
duration: number;
latency: LatencyMetrics;
throughput: ThroughputMetrics;
errors: ErrorMetrics;
resources: ResourceMetrics;
costs: CostMetrics;
scaling: ScalingMetrics;
availability: AvailabilityMetrics;
regional: RegionalMetrics[];
slaCompliance: {
latencySLA: boolean; // p99 < 50ms
availabilitySLA: boolean; // 99.99%
errorRateSLA: boolean; // < 0.01%
};
tags: string[];
metadata: Record<string, any>;
}
// Time series data point
export interface DataPoint {
timestamp: number;
value: number;
tags?: Record<string, string>;
}
export interface TimeSeries {
metric: string;
dataPoints: DataPoint[];
}
// Metrics collector class
export class MetricsCollector {
private metrics: Map<string, TimeSeries>;
private startTime: number;
private outputDir: string;
constructor(outputDir: string = './results') {
this.metrics = new Map();
this.startTime = Date.now();
this.outputDir = outputDir;
// Ensure output directory exists
if (!fs.existsSync(outputDir)) {
fs.mkdirSync(outputDir, { recursive: true });
}
}
// Record a single metric
record(metric: string, value: number, tags?: Record<string, string>): void {
if (!this.metrics.has(metric)) {
this.metrics.set(metric, {
metric,
dataPoints: [],
});
}
this.metrics.get(metric)!.dataPoints.push({
timestamp: Date.now(),
value,
tags,
});
}
// Record latency
recordLatency(latency: number, region?: string): void {
this.record('latency', latency, { region: region || 'unknown' });
}
// Record throughput
recordThroughput(qps: number, region?: string): void {
this.record('throughput', qps, { region: region || 'unknown' });
}
// Record error
recordError(errorType: string, region?: string): void {
this.record('errors', 1, { type: errorType, region: region || 'unknown' });
}
// Record resource usage
recordResource(resource: string, usage: number, region?: string): void {
this.record(`resource_${resource}`, usage, { region: region || 'unknown' });
}
// Calculate latency metrics from raw data
calculateLatencyMetrics(data: number[]): LatencyMetrics {
const sorted = [...data].sort((a, b) => a - b);
const len = sorted.length;
const percentile = (p: number) => {
const index = Math.ceil(len * p) - 1;
return sorted[Math.max(0, index)];
};
const mean = data.reduce((a, b) => a + b, 0) / len;
const variance = data.reduce((a, b) => a + Math.pow(b - mean, 2), 0) / len;
const stddev = Math.sqrt(variance);
return {
min: sorted[0],
max: sorted[len - 1],
mean,
median: percentile(0.5),
p50: percentile(0.5),
p90: percentile(0.9),
p95: percentile(0.95),
p99: percentile(0.99),
p99_9: percentile(0.999),
stddev,
};
}
// Calculate throughput metrics
calculateThroughputMetrics(): ThroughputMetrics {
const throughputSeries = this.metrics.get('throughput');
if (!throughputSeries || throughputSeries.dataPoints.length === 0) {
return {
queriesPerSecond: 0,
bytesPerSecond: 0,
connectionsPerSecond: 0,
peakQPS: 0,
averageQPS: 0,
};
}
const qpsValues = throughputSeries.dataPoints.map(dp => dp.value);
const totalQueries = qpsValues.reduce((a, b) => a + b, 0);
const duration = (Date.now() - this.startTime) / 1000; // seconds
return {
queriesPerSecond: totalQueries / duration,
bytesPerSecond: 0, // TODO: Calculate from data
connectionsPerSecond: 0, // TODO: Calculate from data
peakQPS: Math.max(...qpsValues),
averageQPS: totalQueries / qpsValues.length,
};
}
// Calculate error metrics
calculateErrorMetrics(): ErrorMetrics {
const errorSeries = this.metrics.get('errors');
if (!errorSeries || errorSeries.dataPoints.length === 0) {
return {
totalErrors: 0,
errorRate: 0,
errorsByType: {},
errorsByRegion: {},
timeouts: 0,
connectionErrors: 0,
serverErrors: 0,
clientErrors: 0,
};
}
const errorsByType: Record<string, number> = {};
const errorsByRegion: Record<string, number> = {};
for (const dp of errorSeries.dataPoints) {
const type = dp.tags?.type || 'unknown';
const region = dp.tags?.region || 'unknown';
errorsByType[type] = (errorsByType[type] || 0) + 1;
errorsByRegion[region] = (errorsByRegion[region] || 0) + 1;
}
const totalErrors = errorSeries.dataPoints.length;
const totalRequests = this.getTotalRequests();
return {
totalErrors,
errorRate: totalRequests > 0 ? (totalErrors / totalRequests) * 100 : 0,
errorsByType,
errorsByRegion,
timeouts: errorsByType['timeout'] || 0,
connectionErrors: errorsByType['connection'] || 0,
serverErrors: errorsByType['server'] || 0,
clientErrors: errorsByType['client'] || 0,
};
}
// Calculate resource metrics
calculateResourceMetrics(): ResourceMetrics {
const cpuSeries = this.metrics.get('resource_cpu');
const memorySeries = this.metrics.get('resource_memory');
const networkSeries = this.metrics.get('resource_network');
const cpu = {
average: this.average(cpuSeries?.dataPoints.map(dp => dp.value) || []),
peak: Math.max(...(cpuSeries?.dataPoints.map(dp => dp.value) || [0])),
perRegion: this.aggregateByRegion(cpuSeries),
};
const memory = {
average: this.average(memorySeries?.dataPoints.map(dp => dp.value) || []),
peak: Math.max(...(memorySeries?.dataPoints.map(dp => dp.value) || [0])),
perRegion: this.aggregateByRegion(memorySeries),
};
const network = {
ingressBytes: 0, // TODO: Calculate
egressBytes: 0, // TODO: Calculate
bandwidth: 0, // TODO: Calculate
perRegion: this.aggregateByRegion(networkSeries),
};
return {
cpu,
memory,
network,
disk: {
reads: 0,
writes: 0,
iops: 0,
},
};
}
// Calculate cost metrics
calculateCostMetrics(duration: number): CostMetrics {
const resources = this.calculateResourceMetrics();
const throughput = this.calculateThroughputMetrics();
// GCP pricing estimates (as of 2024)
const computeCostPerHour = 0.50; // per vCPU-hour
const networkCostPerGB = 0.12;
const storageCostPerGB = 0.02;
const durationHours = duration / (1000 * 60 * 60);
const computeCost = resources.cpu.average * computeCostPerHour * durationHours;
const networkCost = (resources.network.ingressBytes + resources.network.egressBytes) / (1024 * 1024 * 1024) * networkCostPerGB;
const storageCost = 0; // TODO: Calculate based on storage usage
const totalCost = computeCost + networkCost + storageCost;
const totalQueries = throughput.queriesPerSecond * (duration / 1000);
const costPerMillionQueries = (totalCost / totalQueries) * 1000000;
return {
computeCost,
networkCost,
storageCost,
totalCost,
costPerMillionQueries,
costPerRegion: {}, // TODO: Calculate per-region costs
};
}
// Calculate scaling metrics
calculateScalingMetrics(): ScalingMetrics {
// TODO: Implement based on collected scaling events
return {
timeToTarget: 0,
scaleUpRate: 0,
scaleDownRate: 0,
autoScaleEvents: 0,
coldStartLatency: 0,
};
}
// Calculate availability metrics
calculateAvailabilityMetrics(duration: number): AvailabilityMetrics {
const errors = this.calculateErrorMetrics();
const downtime = 0; // TODO: Calculate from incident data
return {
uptime: ((duration - downtime) / duration) * 100,
downtime,
mtbf: 0, // TODO: Calculate
mttr: 0, // TODO: Calculate
incidents: [], // TODO: Collect incidents
};
}
// Calculate regional metrics
calculateRegionalMetrics(): RegionalMetrics[] {
const regions = this.getRegions();
const metrics: RegionalMetrics[] = [];
for (const region of regions) {
const latencyData = this.getMetricsByRegion('latency', region);
const throughputData = this.getMetricsByRegion('throughput', region);
const errorData = this.getMetricsByRegion('errors', region);
metrics.push({
region,
latency: this.calculateLatencyMetrics(latencyData),
throughput: {
queriesPerSecond: this.average(throughputData),
bytesPerSecond: 0,
connectionsPerSecond: 0,
peakQPS: Math.max(...throughputData, 0),
averageQPS: this.average(throughputData),
},
errors: {
totalErrors: errorData.length,
errorRate: 0, // TODO: Calculate
errorsByType: {},
errorsByRegion: {},
timeouts: 0,
connectionErrors: 0,
serverErrors: 0,
clientErrors: 0,
},
activeConnections: 0, // TODO: Track
availability: 99.99, // TODO: Calculate
});
}
return metrics;
}
// Generate comprehensive metrics report
generateReport(testId: string, scenario: string): ComprehensiveMetrics {
const endTime = Date.now();
const duration = endTime - this.startTime;
const latencySeries = this.metrics.get('latency');
const latencyData = latencySeries?.dataPoints.map(dp => dp.value) || [];
const latency = this.calculateLatencyMetrics(latencyData);
const throughput = this.calculateThroughputMetrics();
const errors = this.calculateErrorMetrics();
const resources = this.calculateResourceMetrics();
const costs = this.calculateCostMetrics(duration);
const scaling = this.calculateScalingMetrics();
const availability = this.calculateAvailabilityMetrics(duration);
const regional = this.calculateRegionalMetrics();
const slaCompliance = {
latencySLA: latency.p99 < 50,
availabilitySLA: availability.uptime >= 99.99,
errorRateSLA: errors.errorRate < 0.01,
};
return {
testId,
scenario,
startTime: this.startTime,
endTime,
duration,
latency,
throughput,
errors,
resources,
costs,
scaling,
availability,
regional,
slaCompliance,
tags: [],
metadata: {},
};
}
// Save metrics to file
save(filename: string, metrics: ComprehensiveMetrics): void {
const filepath = path.join(this.outputDir, filename);
fs.writeFileSync(filepath, JSON.stringify(metrics, null, 2));
console.log(`Metrics saved to ${filepath}`);
}
// Export to CSV
exportCSV(filename: string): void {
const filepath = path.join(this.outputDir, filename);
const headers = ['timestamp', 'metric', 'value', 'region'];
const rows = [headers.join(',')];
for (const [metric, series] of this.metrics) {
for (const dp of series.dataPoints) {
const row = [
dp.timestamp,
metric,
dp.value,
dp.tags?.region || 'unknown',
];
rows.push(row.join(','));
}
}
fs.writeFileSync(filepath, rows.join('\n'));
console.log(`CSV exported to ${filepath}`);
}
// Helper methods
private getTotalRequests(): number {
const throughputSeries = this.metrics.get('throughput');
if (!throughputSeries) return 0;
return throughputSeries.dataPoints.reduce((sum, dp) => sum + dp.value, 0);
}
private average(values: number[]): number {
if (values.length === 0) return 0;
return values.reduce((a, b) => a + b, 0) / values.length;
}
private aggregateByRegion(series?: TimeSeries): Record<string, number> {
const result: Record<string, number> = {};
if (!series) return result;
for (const dp of series.dataPoints) {
const region = dp.tags?.region || 'unknown';
if (!result[region]) result[region] = 0;
result[region] += dp.value;
}
return result;
}
private getRegions(): string[] {
const regions = new Set<string>();
for (const series of this.metrics.values()) {
for (const dp of series.dataPoints) {
if (dp.tags?.region) {
regions.add(dp.tags.region);
}
}
}
return Array.from(regions);
}
private getMetricsByRegion(metric: string, region: string): number[] {
const series = this.metrics.get(metric);
if (!series) return [];
return series.dataPoints
.filter(dp => dp.tags?.region === region)
.map(dp => dp.value);
}
}
// K6 integration - collect metrics from K6 output
export function collectFromK6Output(outputFile: string): MetricsCollector {
const collector = new MetricsCollector();
try {
const data = fs.readFileSync(outputFile, 'utf-8');
const lines = data.split('\n');
for (const line of lines) {
if (!line.trim()) continue;
try {
const metric = JSON.parse(line);
switch (metric.type) {
case 'Point':
collector.record(metric.metric, metric.data.value, metric.data.tags);
break;
case 'Metric':
// Handle metric definitions
break;
}
} catch (e) {
// Skip invalid lines
}
}
} catch (e) {
console.error('Error reading K6 output:', e);
}
return collector;
}
export default MetricsCollector;

View File

@@ -0,0 +1,663 @@
/**
* Results Analyzer for RuVector Benchmarks
*
* Performs statistical analysis, comparisons, and generates recommendations
*/
import * as fs from 'fs';
import * as path from 'path';
import { ComprehensiveMetrics, LatencyMetrics } from './metrics-collector';
// Analysis result types
export interface StatisticalAnalysis {
scenario: string;
summary: {
totalRequests: number;
successfulRequests: number;
failedRequests: number;
averageLatency: number;
medianLatency: number;
p99Latency: number;
throughput: number;
errorRate: number;
availability: number;
};
distribution: {
latencyHistogram: HistogramBucket[];
throughputOverTime: TimeSeriesData[];
errorRateOverTime: TimeSeriesData[];
};
correlation: {
latencyVsThroughput: number;
errorsVsLoad: number;
resourceVsLatency: number;
};
anomalies: Anomaly[];
}
export interface HistogramBucket {
min: number;
max: number;
count: number;
percentage: number;
}
export interface TimeSeriesData {
timestamp: number;
value: number;
}
export interface Anomaly {
type: 'spike' | 'drop' | 'plateau' | 'oscillation';
metric: string;
timestamp: number;
severity: 'low' | 'medium' | 'high' | 'critical';
description: string;
impact: string;
}
export interface Comparison {
baseline: string;
current: string;
improvements: Record<string, number>; // metric -> % change
regressions: Record<string, number>;
summary: string;
}
export interface Bottleneck {
component: string;
metric: string;
severity: 'low' | 'medium' | 'high' | 'critical';
currentValue: number;
threshold: number;
impact: string;
recommendation: string;
}
export interface Recommendation {
category: 'performance' | 'scalability' | 'reliability' | 'cost';
priority: 'low' | 'medium' | 'high' | 'critical';
title: string;
description: string;
implementation: string;
estimatedImpact: string;
estimatedCost: number;
}
export interface AnalysisReport {
testId: string;
scenario: string;
timestamp: number;
statistical: StatisticalAnalysis;
slaCompliance: SLACompliance;
bottlenecks: Bottleneck[];
recommendations: Recommendation[];
comparison?: Comparison;
score: {
performance: number; // 0-100
reliability: number;
scalability: number;
efficiency: number;
overall: number;
};
}
export interface SLACompliance {
met: boolean;
details: {
latency: {
target: number;
actual: number;
met: boolean;
};
availability: {
target: number;
actual: number;
met: boolean;
};
errorRate: {
target: number;
actual: number;
met: boolean;
};
};
violations: Array<{
metric: string;
timestamp: number;
duration: number;
severity: string;
}>;
}
// Results analyzer class
export class ResultsAnalyzer {
private outputDir: string;
constructor(outputDir: string = './results') {
this.outputDir = outputDir;
}
// Perform statistical analysis
analyzeStatistics(metrics: ComprehensiveMetrics): StatisticalAnalysis {
const totalRequests = metrics.throughput.queriesPerSecond * (metrics.duration / 1000);
const failedRequests = metrics.errors.totalErrors;
const successfulRequests = totalRequests - failedRequests;
return {
scenario: metrics.scenario,
summary: {
totalRequests,
successfulRequests,
failedRequests,
averageLatency: metrics.latency.mean,
medianLatency: metrics.latency.median,
p99Latency: metrics.latency.p99,
throughput: metrics.throughput.queriesPerSecond,
errorRate: metrics.errors.errorRate,
availability: metrics.availability.uptime,
},
distribution: {
latencyHistogram: this.createLatencyHistogram(metrics.latency),
throughputOverTime: [], // TODO: Extract from time series
errorRateOverTime: [], // TODO: Extract from time series
},
correlation: {
latencyVsThroughput: 0, // TODO: Calculate correlation
errorsVsLoad: 0,
resourceVsLatency: 0,
},
anomalies: this.detectAnomalies(metrics),
};
}
// Create latency histogram
private createLatencyHistogram(latency: LatencyMetrics): HistogramBucket[] {
// NOTE: This function cannot create accurate histograms without raw latency samples.
// We only have percentile data (p50, p95, p99), which is insufficient for distribution.
// Returning empty histogram to avoid fabricating data.
console.warn(
'Cannot generate latency histogram without raw sample data. ' +
'Only percentile metrics (p50, p95, p99) are available. ' +
'To get accurate histograms, modify metrics collection to store raw latency samples.'
);
return []; // Return empty array instead of fabricated data
}
// Detect anomalies
private detectAnomalies(metrics: ComprehensiveMetrics): Anomaly[] {
const anomalies: Anomaly[] = [];
// Latency spikes
if (metrics.latency.p99 > metrics.latency.mean * 5) {
anomalies.push({
type: 'spike',
metric: 'latency',
timestamp: metrics.endTime,
severity: 'high',
description: `P99 latency (${metrics.latency.p99}ms) is 5x higher than mean (${metrics.latency.mean}ms)`,
impact: 'Users experiencing slow responses',
});
}
// Error rate spikes
if (metrics.errors.errorRate > 1) {
anomalies.push({
type: 'spike',
metric: 'error_rate',
timestamp: metrics.endTime,
severity: 'critical',
description: `Error rate (${metrics.errors.errorRate}%) exceeds acceptable threshold`,
impact: 'Service degradation affecting users',
});
}
// Throughput drops
if (metrics.throughput.averageQPS < metrics.throughput.peakQPS * 0.5) {
anomalies.push({
type: 'drop',
metric: 'throughput',
timestamp: metrics.endTime,
severity: 'medium',
description: 'Throughput dropped below 50% of peak capacity',
impact: 'Reduced capacity affecting scalability',
});
}
// Resource saturation
if (metrics.resources.cpu.peak > 90) {
anomalies.push({
type: 'plateau',
metric: 'cpu',
timestamp: metrics.endTime,
severity: 'high',
description: `CPU utilization at ${metrics.resources.cpu.peak}%`,
impact: 'System approaching capacity limits',
});
}
return anomalies;
}
// Check SLA compliance
checkSLACompliance(metrics: ComprehensiveMetrics): SLACompliance {
const latencyTarget = 50; // p99 < 50ms
const availabilityTarget = 99.99; // 99.99% uptime
const errorRateTarget = 0.01; // < 0.01% errors
const latencyMet = metrics.latency.p99 < latencyTarget;
const availabilityMet = metrics.availability.uptime >= availabilityTarget;
const errorRateMet = metrics.errors.errorRate < errorRateTarget;
const violations: Array<{
metric: string;
timestamp: number;
duration: number;
severity: string;
}> = [];
if (!latencyMet) {
violations.push({
metric: 'latency',
timestamp: metrics.endTime,
duration: metrics.duration,
severity: 'high',
});
}
if (!availabilityMet) {
violations.push({
metric: 'availability',
timestamp: metrics.endTime,
duration: metrics.duration,
severity: 'critical',
});
}
if (!errorRateMet) {
violations.push({
metric: 'error_rate',
timestamp: metrics.endTime,
duration: metrics.duration,
severity: 'high',
});
}
return {
met: latencyMet && availabilityMet && errorRateMet,
details: {
latency: {
target: latencyTarget,
actual: metrics.latency.p99,
met: latencyMet,
},
availability: {
target: availabilityTarget,
actual: metrics.availability.uptime,
met: availabilityMet,
},
errorRate: {
target: errorRateTarget,
actual: metrics.errors.errorRate,
met: errorRateMet,
},
},
violations,
};
}
// Identify bottlenecks
identifyBottlenecks(metrics: ComprehensiveMetrics): Bottleneck[] {
const bottlenecks: Bottleneck[] = [];
// CPU bottleneck
if (metrics.resources.cpu.average > 80) {
bottlenecks.push({
component: 'compute',
metric: 'cpu_utilization',
severity: 'high',
currentValue: metrics.resources.cpu.average,
threshold: 80,
impact: 'High CPU usage limiting throughput and increasing latency',
recommendation: 'Scale horizontally or optimize CPU-intensive operations',
});
}
// Memory bottleneck
if (metrics.resources.memory.average > 85) {
bottlenecks.push({
component: 'memory',
metric: 'memory_utilization',
severity: 'high',
currentValue: metrics.resources.memory.average,
threshold: 85,
impact: 'Memory pressure may cause swapping and degraded performance',
recommendation: 'Increase memory allocation or optimize memory usage',
});
}
// Network bottleneck
if (metrics.resources.network.bandwidth > 8000000000) { // 8 Gbps
bottlenecks.push({
component: 'network',
metric: 'bandwidth',
severity: 'medium',
currentValue: metrics.resources.network.bandwidth,
threshold: 8000000000,
impact: 'Network bandwidth saturation affecting data transfer',
recommendation: 'Upgrade network capacity or implement compression',
});
}
// Latency bottleneck
if (metrics.latency.p99 > 100) {
bottlenecks.push({
component: 'latency',
metric: 'p99_latency',
severity: 'critical',
currentValue: metrics.latency.p99,
threshold: 50,
impact: 'High tail latency affecting user experience',
recommendation: 'Optimize query processing, add caching, or improve indexing',
});
}
// Regional imbalance
const regionalLatencies = metrics.regional.map(r => r.latency.mean);
const maxRegionalLatency = Math.max(...regionalLatencies);
const minRegionalLatency = Math.min(...regionalLatencies);
if (maxRegionalLatency > minRegionalLatency * 2) {
bottlenecks.push({
component: 'regional_distribution',
metric: 'latency_variance',
severity: 'medium',
currentValue: maxRegionalLatency / minRegionalLatency,
threshold: 2,
impact: 'Uneven regional performance affecting global users',
recommendation: 'Rebalance load across regions or add capacity to slow regions',
});
}
return bottlenecks;
}
// Generate recommendations
generateRecommendations(
metrics: ComprehensiveMetrics,
bottlenecks: Bottleneck[]
): Recommendation[] {
const recommendations: Recommendation[] = [];
// Performance recommendations
if (metrics.latency.p99 > 50) {
recommendations.push({
category: 'performance',
priority: 'high',
title: 'Optimize Query Latency',
description: 'P99 latency exceeds target of 50ms',
implementation: 'Add query result caching, optimize vector indexing (HNSW tuning), implement query batching',
estimatedImpact: '30-50% latency reduction',
estimatedCost: 5000,
});
}
// Scalability recommendations
if (bottlenecks.some(b => b.component === 'compute')) {
recommendations.push({
category: 'scalability',
priority: 'high',
title: 'Scale Compute Capacity',
description: 'CPU utilization consistently high',
implementation: 'Increase pod replicas, enable auto-scaling, or upgrade instance types',
estimatedImpact: '100% throughput increase',
estimatedCost: 10000,
});
}
// Reliability recommendations
if (metrics.errors.errorRate > 0.01) {
recommendations.push({
category: 'reliability',
priority: 'critical',
title: 'Improve Error Handling',
description: 'Error rate exceeds acceptable threshold',
implementation: 'Add circuit breakers, implement retry logic with backoff, improve health checks',
estimatedImpact: '80% error reduction',
estimatedCost: 3000,
});
}
// Cost optimization
if (metrics.costs.costPerMillionQueries > 0.50) {
recommendations.push({
category: 'cost',
priority: 'medium',
title: 'Optimize Infrastructure Costs',
description: 'Cost per million queries higher than target',
implementation: 'Use spot instances, implement aggressive caching, optimize resource allocation',
estimatedImpact: '40% cost reduction',
estimatedCost: 2000,
});
}
// Regional optimization
if (bottlenecks.some(b => b.component === 'regional_distribution')) {
recommendations.push({
category: 'performance',
priority: 'medium',
title: 'Balance Regional Load',
description: 'Significant latency variance across regions',
implementation: 'Rebalance traffic with intelligent routing, add capacity to slow regions',
estimatedImpact: '25% improvement in global latency',
estimatedCost: 8000,
});
}
return recommendations;
}
// Calculate performance score
calculateScore(metrics: ComprehensiveMetrics, sla: SLACompliance): {
performance: number;
reliability: number;
scalability: number;
efficiency: number;
overall: number;
} {
// Performance score (based on latency)
const latencyScore = Math.max(0, 100 - (metrics.latency.p99 / 50) * 100);
const throughputScore = Math.min(100, (metrics.throughput.queriesPerSecond / 50000000) * 100);
const performance = (latencyScore + throughputScore) / 2;
// Reliability score (based on availability and error rate)
const availabilityScore = metrics.availability.uptime;
const errorScore = Math.max(0, 100 - metrics.errors.errorRate * 100);
const reliability = (availabilityScore + errorScore) / 2;
// Scalability score (based on resource utilization)
const cpuScore = Math.max(0, 100 - metrics.resources.cpu.average);
const memoryScore = Math.max(0, 100 - metrics.resources.memory.average);
const scalability = (cpuScore + memoryScore) / 2;
// Efficiency score (based on cost)
const costScore = Math.max(0, 100 - (metrics.costs.costPerMillionQueries / 0.10) * 10);
const efficiency = costScore;
// Overall score (weighted average)
const overall = (
performance * 0.35 +
reliability * 0.35 +
scalability * 0.20 +
efficiency * 0.10
);
return {
performance: Math.round(performance),
reliability: Math.round(reliability),
scalability: Math.round(scalability),
efficiency: Math.round(efficiency),
overall: Math.round(overall),
};
}
// Compare two test results
compare(baseline: ComprehensiveMetrics, current: ComprehensiveMetrics): Comparison {
const improvements: Record<string, number> = {};
const regressions: Record<string, number> = {};
// Latency comparison
const latencyChange = ((current.latency.p99 - baseline.latency.p99) / baseline.latency.p99) * 100;
if (latencyChange < 0) {
improvements['p99_latency'] = Math.abs(latencyChange);
} else {
regressions['p99_latency'] = latencyChange;
}
// Throughput comparison
const throughputChange = ((current.throughput.queriesPerSecond - baseline.throughput.queriesPerSecond) / baseline.throughput.queriesPerSecond) * 100;
if (throughputChange > 0) {
improvements['throughput'] = throughputChange;
} else {
regressions['throughput'] = Math.abs(throughputChange);
}
// Error rate comparison
const errorChange = ((current.errors.errorRate - baseline.errors.errorRate) / baseline.errors.errorRate) * 100;
if (errorChange < 0) {
improvements['error_rate'] = Math.abs(errorChange);
} else {
regressions['error_rate'] = errorChange;
}
// Generate summary
const improvementCount = Object.keys(improvements).length;
const regressionCount = Object.keys(regressions).length;
let summary = '';
if (improvementCount > regressionCount) {
summary = `Overall improvement: ${improvementCount} metrics improved, ${regressionCount} regressed`;
} else if (regressionCount > improvementCount) {
summary = `Overall regression: ${regressionCount} metrics regressed, ${improvementCount} improved`;
} else {
summary = 'Mixed results: equal improvements and regressions';
}
return {
baseline: baseline.scenario,
current: current.scenario,
improvements,
regressions,
summary,
};
}
// Generate full analysis report
generateReport(metrics: ComprehensiveMetrics, baseline?: ComprehensiveMetrics): AnalysisReport {
const statistical = this.analyzeStatistics(metrics);
const slaCompliance = this.checkSLACompliance(metrics);
const bottlenecks = this.identifyBottlenecks(metrics);
const recommendations = this.generateRecommendations(metrics, bottlenecks);
const score = this.calculateScore(metrics, slaCompliance);
const comparison = baseline ? this.compare(baseline, metrics) : undefined;
return {
testId: metrics.testId,
scenario: metrics.scenario,
timestamp: Date.now(),
statistical,
slaCompliance,
bottlenecks,
recommendations,
comparison,
score,
};
}
// Save analysis report
save(filename: string, report: AnalysisReport): void {
const filepath = path.join(this.outputDir, filename);
fs.writeFileSync(filepath, JSON.stringify(report, null, 2));
console.log(`Analysis report saved to ${filepath}`);
}
// Generate markdown report
generateMarkdown(report: AnalysisReport): string {
let md = `# Benchmark Analysis Report\n\n`;
md += `**Test ID:** ${report.testId}\n`;
md += `**Scenario:** ${report.scenario}\n`;
md += `**Timestamp:** ${new Date(report.timestamp).toISOString()}\n\n`;
// Executive Summary
md += `## Executive Summary\n\n`;
md += `**Overall Score:** ${report.score.overall}/100\n\n`;
md += `- Performance: ${report.score.performance}/100\n`;
md += `- Reliability: ${report.score.reliability}/100\n`;
md += `- Scalability: ${report.score.scalability}/100\n`;
md += `- Efficiency: ${report.score.efficiency}/100\n\n`;
// SLA Compliance
md += `## SLA Compliance\n\n`;
md += `**Status:** ${report.slaCompliance.met ? '✅ PASSED' : '❌ FAILED'}\n\n`;
md += `| Metric | Target | Actual | Status |\n`;
md += `|--------|--------|--------|--------|\n`;
md += `| Latency (p99) | <${report.slaCompliance.details.latency.target}ms | ${report.slaCompliance.details.latency.actual.toFixed(2)}ms | ${report.slaCompliance.details.latency.met ? '✅' : '❌'} |\n`;
md += `| Availability | >${report.slaCompliance.details.availability.target}% | ${report.slaCompliance.details.availability.actual.toFixed(2)}% | ${report.slaCompliance.details.availability.met ? '✅' : '❌'} |\n`;
md += `| Error Rate | <${report.slaCompliance.details.errorRate.target}% | ${report.slaCompliance.details.errorRate.actual.toFixed(4)}% | ${report.slaCompliance.details.errorRate.met ? '✅' : '❌'} |\n\n`;
// Bottlenecks
if (report.bottlenecks.length > 0) {
md += `## Bottlenecks\n\n`;
for (const bottleneck of report.bottlenecks) {
md += `### ${bottleneck.component} - ${bottleneck.metric}\n`;
md += `**Severity:** ${bottleneck.severity.toUpperCase()}\n`;
md += `**Current Value:** ${bottleneck.currentValue}\n`;
md += `**Threshold:** ${bottleneck.threshold}\n`;
md += `**Impact:** ${bottleneck.impact}\n`;
md += `**Recommendation:** ${bottleneck.recommendation}\n\n`;
}
}
// Recommendations
if (report.recommendations.length > 0) {
md += `## Recommendations\n\n`;
for (const rec of report.recommendations) {
md += `### ${rec.title}\n`;
md += `**Priority:** ${rec.priority.toUpperCase()} | **Category:** ${rec.category}\n`;
md += `**Description:** ${rec.description}\n`;
md += `**Implementation:** ${rec.implementation}\n`;
md += `**Estimated Impact:** ${rec.estimatedImpact}\n`;
md += `**Estimated Cost:** $${rec.estimatedCost}\n\n`;
}
}
// Comparison
if (report.comparison) {
md += `## Comparison vs Baseline\n\n`;
md += `**Baseline:** ${report.comparison.baseline}\n`;
md += `**Current:** ${report.comparison.current}\n\n`;
md += `**Summary:** ${report.comparison.summary}\n\n`;
if (Object.keys(report.comparison.improvements).length > 0) {
md += `### Improvements\n`;
for (const [metric, change] of Object.entries(report.comparison.improvements)) {
md += `- ${metric}: +${change.toFixed(2)}%\n`;
}
md += `\n`;
}
if (Object.keys(report.comparison.regressions).length > 0) {
md += `### Regressions\n`;
for (const [metric, change] of Object.entries(report.comparison.regressions)) {
md += `- ${metric}: -${change.toFixed(2)}%\n`;
}
md += `\n`;
}
}
return md;
}
}
export default ResultsAnalyzer;