Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
541
vendor/ruvector/examples/neural-trader/tests/neural-trader.test.js
vendored
Normal file
541
vendor/ruvector/examples/neural-trader/tests/neural-trader.test.js
vendored
Normal file
@@ -0,0 +1,541 @@
|
||||
/**
|
||||
* Neural-Trader Test Suite
|
||||
*
|
||||
* Comprehensive tests for all production modules
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach } from '@jest/globals';
|
||||
|
||||
// Mock performance for Node.js
|
||||
if (typeof performance === 'undefined') {
|
||||
global.performance = { now: () => Date.now() };
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FRACTIONAL KELLY TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('Fractional Kelly Engine', () => {
|
||||
let KellyCriterion, TradingKelly, SportsBettingKelly;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module = await import('../production/fractional-kelly.js');
|
||||
KellyCriterion = module.KellyCriterion;
|
||||
TradingKelly = module.TradingKelly;
|
||||
SportsBettingKelly = module.SportsBettingKelly;
|
||||
});
|
||||
|
||||
describe('KellyCriterion', () => {
|
||||
it('should calculate full Kelly correctly', () => {
|
||||
const kelly = new KellyCriterion();
|
||||
const result = kelly.calculateFullKelly(0.55, 2.0);
|
||||
expect(result).toBeCloseTo(0.10, 2); // 10% for 55% win rate at even odds
|
||||
});
|
||||
|
||||
it('should return 0 for negative edge', () => {
|
||||
const kelly = new KellyCriterion();
|
||||
const result = kelly.calculateFullKelly(0.40, 2.0);
|
||||
expect(result).toBeLessThanOrEqual(0);
|
||||
});
|
||||
|
||||
it('should apply fractional Kelly correctly', () => {
|
||||
const kelly = new KellyCriterion();
|
||||
const result = kelly.calculateFractionalKelly(0.55, 2.0, 'conservative');
|
||||
expect(result.stake).toBeLessThan(kelly.calculateFullKelly(0.55, 2.0) * 10000);
|
||||
});
|
||||
|
||||
it('should handle edge cases', () => {
|
||||
const kelly = new KellyCriterion();
|
||||
expect(kelly.calculateFullKelly(0, 2.0)).toBeLessThanOrEqual(0);
|
||||
expect(kelly.calculateFullKelly(1, 2.0)).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('TradingKelly', () => {
|
||||
it('should calculate position size', () => {
|
||||
const kelly = new TradingKelly();
|
||||
const result = kelly.calculatePositionSize(100000, 0.55, 0.02, 0.015);
|
||||
expect(result.positionSize).toBeGreaterThan(0);
|
||||
expect(result.positionSize).toBeLessThan(100000);
|
||||
});
|
||||
|
||||
it('should respect max position limits', () => {
|
||||
const kelly = new TradingKelly();
|
||||
const result = kelly.calculatePositionSize(100000, 0.99, 0.10, 0.01);
|
||||
expect(result.positionSize).toBeLessThanOrEqual(100000 * 0.20);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// LSTM-TRANSFORMER TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('Hybrid LSTM-Transformer', () => {
|
||||
let HybridLSTMTransformer, FeatureExtractor, LSTMCell;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module = await import('../production/hybrid-lstm-transformer.js');
|
||||
HybridLSTMTransformer = module.HybridLSTMTransformer;
|
||||
FeatureExtractor = module.FeatureExtractor;
|
||||
LSTMCell = module.LSTMCell;
|
||||
});
|
||||
|
||||
describe('LSTMCell', () => {
|
||||
it('should forward pass correctly', () => {
|
||||
const cell = new LSTMCell(10, 64);
|
||||
const x = new Array(10).fill(0.1);
|
||||
const h = new Array(64).fill(0);
|
||||
const c = new Array(64).fill(0);
|
||||
|
||||
const result = cell.forward(x, h, c);
|
||||
expect(result.h).toHaveLength(64);
|
||||
expect(result.c).toHaveLength(64);
|
||||
expect(result.h.every(v => !isNaN(v))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle zero inputs', () => {
|
||||
const cell = new LSTMCell(10, 64);
|
||||
const x = new Array(10).fill(0);
|
||||
const h = new Array(64).fill(0);
|
||||
const c = new Array(64).fill(0);
|
||||
|
||||
const result = cell.forward(x, h, c);
|
||||
expect(result.h.every(v => isFinite(v))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('FeatureExtractor', () => {
|
||||
it('should extract features from candles', () => {
|
||||
const extractor = new FeatureExtractor();
|
||||
const candles = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
candles.push({
|
||||
open: 100 + Math.random() * 10,
|
||||
high: 105 + Math.random() * 10,
|
||||
low: 95 + Math.random() * 10,
|
||||
close: 100 + Math.random() * 10,
|
||||
volume: 1000000
|
||||
});
|
||||
}
|
||||
|
||||
const features = extractor.extract(candles);
|
||||
expect(features.length).toBe(99); // One less than candles
|
||||
expect(features[0].length).toBe(10); // 10 features
|
||||
});
|
||||
});
|
||||
|
||||
describe('HybridLSTMTransformer', () => {
|
||||
it('should produce valid predictions', () => {
|
||||
const model = new HybridLSTMTransformer();
|
||||
const features = [];
|
||||
for (let i = 0; i < 50; i++) {
|
||||
features.push(new Array(10).fill(0).map(() => Math.random() - 0.5));
|
||||
}
|
||||
|
||||
const result = model.predict(features);
|
||||
expect(result).toHaveProperty('prediction');
|
||||
expect(result).toHaveProperty('confidence');
|
||||
expect(result).toHaveProperty('signal');
|
||||
expect(['BUY', 'SELL', 'HOLD']).toContain(result.signal);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// DRL PORTFOLIO MANAGER TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('DRL Portfolio Manager', () => {
|
||||
let EnsemblePortfolioManager, ReplayBuffer, NeuralNetwork;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module = await import('../production/drl-portfolio-manager.js');
|
||||
EnsemblePortfolioManager = module.EnsemblePortfolioManager;
|
||||
ReplayBuffer = module.ReplayBuffer;
|
||||
NeuralNetwork = module.NeuralNetwork;
|
||||
});
|
||||
|
||||
describe('ReplayBuffer', () => {
|
||||
it('should store and sample experiences', () => {
|
||||
const buffer = new ReplayBuffer(100);
|
||||
|
||||
for (let i = 0; i < 50; i++) {
|
||||
buffer.push(
|
||||
new Array(10).fill(i),
|
||||
new Array(5).fill(0.2),
|
||||
Math.random(),
|
||||
new Array(10).fill(i + 1),
|
||||
false
|
||||
);
|
||||
}
|
||||
|
||||
expect(buffer.length).toBe(50);
|
||||
const batch = buffer.sample(16);
|
||||
expect(batch).toHaveLength(16);
|
||||
});
|
||||
|
||||
it('should respect max size', () => {
|
||||
const buffer = new ReplayBuffer(10);
|
||||
|
||||
for (let i = 0; i < 20; i++) {
|
||||
buffer.push([i], [0.5], 1, [i + 1], false);
|
||||
}
|
||||
|
||||
expect(buffer.length).toBe(10);
|
||||
});
|
||||
});
|
||||
|
||||
describe('NeuralNetwork', () => {
|
||||
it('should forward pass correctly', () => {
|
||||
const net = new NeuralNetwork([10, 32, 5]);
|
||||
const input = new Array(10).fill(0.5);
|
||||
const result = net.forward(input);
|
||||
|
||||
expect(result.output).toHaveLength(5);
|
||||
expect(result.output.every(v => isFinite(v))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('EnsemblePortfolioManager', () => {
|
||||
it('should produce valid portfolio weights', () => {
|
||||
const manager = new EnsemblePortfolioManager(5);
|
||||
const state = new Array(62).fill(0).map(() => Math.random());
|
||||
const action = manager.getEnsembleAction(state);
|
||||
|
||||
expect(action).toHaveLength(5);
|
||||
const sum = action.reduce((a, b) => a + b, 0);
|
||||
expect(sum).toBeCloseTo(1, 1); // Weights should sum to ~1
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// SENTIMENT ALPHA TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('Sentiment Alpha Pipeline', () => {
|
||||
let LexiconAnalyzer, EmbeddingAnalyzer, SentimentAggregator;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module = await import('../production/sentiment-alpha.js');
|
||||
LexiconAnalyzer = module.LexiconAnalyzer;
|
||||
EmbeddingAnalyzer = module.EmbeddingAnalyzer;
|
||||
SentimentAggregator = module.SentimentAggregator;
|
||||
});
|
||||
|
||||
describe('LexiconAnalyzer', () => {
|
||||
it('should detect positive sentiment', () => {
|
||||
const analyzer = new LexiconAnalyzer();
|
||||
const result = analyzer.analyze('Strong growth and profit beat expectations');
|
||||
expect(result.score).toBeGreaterThan(0);
|
||||
expect(result.positiveCount).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should detect negative sentiment', () => {
|
||||
const analyzer = new LexiconAnalyzer();
|
||||
const result = analyzer.analyze('Losses and decline amid recession fears');
|
||||
expect(result.score).toBeLessThan(0);
|
||||
expect(result.negativeCount).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle neutral text', () => {
|
||||
const analyzer = new LexiconAnalyzer();
|
||||
const result = analyzer.analyze('The company released quarterly results');
|
||||
expect(Math.abs(result.score)).toBeLessThan(0.5);
|
||||
});
|
||||
|
||||
it('should handle negators', () => {
|
||||
const analyzer = new LexiconAnalyzer();
|
||||
const positive = analyzer.analyze('Strong growth');
|
||||
const negated = analyzer.analyze('Not strong growth');
|
||||
expect(negated.score).toBeLessThan(positive.score);
|
||||
});
|
||||
});
|
||||
|
||||
describe('SentimentAggregator', () => {
|
||||
it('should aggregate multiple sentiments', () => {
|
||||
const aggregator = new SentimentAggregator();
|
||||
|
||||
aggregator.addSentiment('AAPL', { source: 'news', score: 0.5, confidence: 0.8 });
|
||||
aggregator.addSentiment('AAPL', { source: 'social', score: 0.3, confidence: 0.6 });
|
||||
|
||||
const result = aggregator.getAggregatedSentiment('AAPL');
|
||||
expect(result.score).toBeGreaterThan(0);
|
||||
expect(result.count).toBe(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// BACKTESTING TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('Backtesting Framework', () => {
|
||||
let PerformanceMetrics, BacktestEngine;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module = await import('../system/backtesting.js');
|
||||
PerformanceMetrics = module.PerformanceMetrics;
|
||||
BacktestEngine = module.BacktestEngine;
|
||||
});
|
||||
|
||||
describe('PerformanceMetrics', () => {
|
||||
it('should calculate metrics from equity curve', () => {
|
||||
const metrics = new PerformanceMetrics(0.05);
|
||||
const equityCurve = [100000];
|
||||
|
||||
// Generate random walk
|
||||
for (let i = 0; i < 252; i++) {
|
||||
const lastValue = equityCurve[equityCurve.length - 1];
|
||||
equityCurve.push(lastValue * (1 + (Math.random() - 0.48) * 0.02));
|
||||
}
|
||||
|
||||
const result = metrics.calculate(equityCurve);
|
||||
|
||||
expect(result).toHaveProperty('totalReturn');
|
||||
expect(result).toHaveProperty('sharpeRatio');
|
||||
expect(result).toHaveProperty('maxDrawdown');
|
||||
expect(result.tradingDays).toBe(252);
|
||||
});
|
||||
|
||||
it('should handle edge cases', () => {
|
||||
const metrics = new PerformanceMetrics();
|
||||
|
||||
// Empty curve
|
||||
expect(metrics.calculate([]).totalReturn).toBe(0);
|
||||
|
||||
// Single value
|
||||
expect(metrics.calculate([100]).totalReturn).toBe(0);
|
||||
});
|
||||
|
||||
it('should compute drawdown correctly', () => {
|
||||
const metrics = new PerformanceMetrics();
|
||||
const equityCurve = [100, 110, 105, 95, 100, 90, 95];
|
||||
|
||||
const result = metrics.calculate(equityCurve);
|
||||
expect(result.maxDrawdown).toBeCloseTo(0.182, 2); // (110-90)/110
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// RISK MANAGEMENT TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('Risk Management', () => {
|
||||
let RiskManager, CircuitBreaker, StopLossManager;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module = await import('../system/risk-management.js');
|
||||
RiskManager = module.RiskManager;
|
||||
CircuitBreaker = module.CircuitBreaker;
|
||||
StopLossManager = module.StopLossManager;
|
||||
});
|
||||
|
||||
describe('StopLossManager', () => {
|
||||
it('should set and check stops', () => {
|
||||
const manager = new StopLossManager();
|
||||
manager.setStop('AAPL', 150, 'fixed');
|
||||
|
||||
const check = manager.checkStop('AAPL', 140);
|
||||
expect(check.triggered).toBe(true);
|
||||
});
|
||||
|
||||
it('should update trailing stops', () => {
|
||||
const manager = new StopLossManager();
|
||||
manager.setStop('AAPL', 100, 'trailing');
|
||||
|
||||
manager.updateTrailingStop('AAPL', 110);
|
||||
const stop = manager.stops.get('AAPL');
|
||||
expect(stop.highWaterMark).toBe(110);
|
||||
expect(stop.stopPrice).toBeGreaterThan(100 * 0.97);
|
||||
});
|
||||
});
|
||||
|
||||
describe('CircuitBreaker', () => {
|
||||
it('should trip on consecutive losses', () => {
|
||||
const breaker = new CircuitBreaker({ consecutiveLosses: 3 });
|
||||
|
||||
breaker.recordTrade(-100);
|
||||
breaker.recordTrade(-100);
|
||||
expect(breaker.canTrade().allowed).toBe(true);
|
||||
|
||||
breaker.recordTrade(-100);
|
||||
expect(breaker.canTrade().allowed).toBe(false);
|
||||
});
|
||||
|
||||
it('should reset after cooldown', () => {
|
||||
const breaker = new CircuitBreaker({
|
||||
consecutiveLosses: 2,
|
||||
drawdownCooldown: 100 // 100ms for testing
|
||||
});
|
||||
|
||||
breaker.recordTrade(-100);
|
||||
breaker.recordTrade(-100);
|
||||
expect(breaker.canTrade().allowed).toBe(false);
|
||||
|
||||
// Wait for cooldown
|
||||
return new Promise(resolve => {
|
||||
setTimeout(() => {
|
||||
expect(breaker.canTrade().allowed).toBe(true);
|
||||
resolve();
|
||||
}, 150);
|
||||
});
|
||||
});
|
||||
|
||||
it('should trip on drawdown', () => {
|
||||
const breaker = new CircuitBreaker({ drawdownThreshold: 0.10 });
|
||||
|
||||
breaker.updateEquity(100000);
|
||||
breaker.updateEquity(88000); // 12% drawdown
|
||||
|
||||
expect(breaker.canTrade().allowed).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('RiskManager', () => {
|
||||
it('should check trade limits', () => {
|
||||
const manager = new RiskManager();
|
||||
manager.startDay(100000);
|
||||
|
||||
const portfolio = {
|
||||
equity: 100000,
|
||||
cash: 50000,
|
||||
positions: {}
|
||||
};
|
||||
|
||||
const trade = { symbol: 'AAPL', side: 'buy', value: 5000 };
|
||||
const result = manager.canTrade('AAPL', trade, portfolio);
|
||||
|
||||
expect(result.allowed).toBe(true);
|
||||
});
|
||||
|
||||
it('should block oversized trades', () => {
|
||||
const manager = new RiskManager();
|
||||
|
||||
const portfolio = {
|
||||
equity: 100000,
|
||||
cash: 50000,
|
||||
positions: {}
|
||||
};
|
||||
|
||||
const trade = { symbol: 'AAPL', side: 'buy', value: 60000 };
|
||||
const result = manager.canTrade('AAPL', trade, portfolio);
|
||||
|
||||
expect(result.checks.positionSize.violations.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// TRADING PIPELINE TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('Trading Pipeline', () => {
|
||||
let TradingPipeline, TradingDag;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module = await import('../system/trading-pipeline.js');
|
||||
TradingPipeline = module.TradingPipeline;
|
||||
TradingDag = module.TradingDag;
|
||||
});
|
||||
|
||||
describe('TradingDag', () => {
|
||||
it('should execute nodes in order', async () => {
|
||||
const dag = new TradingDag();
|
||||
const order = [];
|
||||
|
||||
dag.addNode({
|
||||
id: 'a',
|
||||
name: 'Node A',
|
||||
dependencies: [],
|
||||
status: 'pending',
|
||||
executor: async () => { order.push('a'); return 'A'; }
|
||||
});
|
||||
|
||||
dag.addNode({
|
||||
id: 'b',
|
||||
name: 'Node B',
|
||||
dependencies: ['a'],
|
||||
status: 'pending',
|
||||
executor: async (ctx, deps) => { order.push('b'); return deps.a + 'B'; }
|
||||
});
|
||||
|
||||
await dag.execute({});
|
||||
|
||||
expect(order).toEqual(['a', 'b']);
|
||||
expect(dag.results.get('b')).toBe('AB');
|
||||
});
|
||||
|
||||
it('should execute parallel nodes concurrently', async () => {
|
||||
const dag = new TradingDag({ parallelExecution: true, maxConcurrency: 2 });
|
||||
const timestamps = {};
|
||||
|
||||
dag.addNode({
|
||||
id: 'a',
|
||||
dependencies: [],
|
||||
status: 'pending',
|
||||
executor: async () => { timestamps.a = Date.now(); return 'A'; }
|
||||
});
|
||||
|
||||
dag.addNode({
|
||||
id: 'b',
|
||||
dependencies: [],
|
||||
status: 'pending',
|
||||
executor: async () => { timestamps.b = Date.now(); return 'B'; }
|
||||
});
|
||||
|
||||
await dag.execute({});
|
||||
|
||||
// Both should start at nearly the same time
|
||||
expect(Math.abs(timestamps.a - timestamps.b)).toBeLessThan(50);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// INTEGRATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
describe('Integration Tests', () => {
|
||||
it('should run complete pipeline', async () => {
|
||||
const { createTradingPipeline } = await import('../system/trading-pipeline.js');
|
||||
const pipeline = createTradingPipeline();
|
||||
|
||||
const generateCandles = (n) => {
|
||||
const candles = [];
|
||||
let price = 100;
|
||||
for (let i = 0; i < n; i++) {
|
||||
price *= (1 + (Math.random() - 0.5) * 0.02);
|
||||
candles.push({
|
||||
open: price * 0.99,
|
||||
high: price * 1.01,
|
||||
low: price * 0.98,
|
||||
close: price,
|
||||
volume: 1000000
|
||||
});
|
||||
}
|
||||
return candles;
|
||||
};
|
||||
|
||||
const context = {
|
||||
marketData: generateCandles(100),
|
||||
newsData: [
|
||||
{ symbol: 'TEST', text: 'Strong earnings growth', source: 'news' }
|
||||
],
|
||||
symbols: ['TEST'],
|
||||
portfolio: { equity: 100000, cash: 50000, positions: {}, assets: ['TEST'] },
|
||||
prices: { TEST: 100 }
|
||||
};
|
||||
|
||||
const result = await pipeline.execute(context);
|
||||
|
||||
expect(result).toHaveProperty('signals');
|
||||
expect(result).toHaveProperty('positions');
|
||||
expect(result).toHaveProperty('orders');
|
||||
expect(result).toHaveProperty('metrics');
|
||||
});
|
||||
});
|
||||
|
||||
export {};
|
||||
489
vendor/ruvector/examples/neural-trader/tests/production-benchmark.js
vendored
Normal file
489
vendor/ruvector/examples/neural-trader/tests/production-benchmark.js
vendored
Normal file
@@ -0,0 +1,489 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Production Module Benchmark Suite
|
||||
*
|
||||
* Comprehensive benchmarks for:
|
||||
* - Fractional Kelly Engine
|
||||
* - Hybrid LSTM-Transformer
|
||||
* - DRL Portfolio Manager
|
||||
* - Sentiment Alpha Pipeline
|
||||
*
|
||||
* Measures: latency, throughput, accuracy, memory usage
|
||||
*/
|
||||
|
||||
import { performance } from 'perf_hooks';
|
||||
|
||||
// Benchmark configuration
|
||||
const benchConfig = {
|
||||
iterations: 100,
|
||||
warmupIterations: 10,
|
||||
dataPoints: {
|
||||
small: 100,
|
||||
medium: 500,
|
||||
large: 1000
|
||||
}
|
||||
};
|
||||
|
||||
// Memory tracking
|
||||
function getMemoryMB() {
|
||||
const usage = process.memoryUsage();
|
||||
return {
|
||||
heap: Math.round(usage.heapUsed / 1024 / 1024 * 100) / 100,
|
||||
total: Math.round(usage.heapTotal / 1024 / 1024 * 100) / 100
|
||||
};
|
||||
}
|
||||
|
||||
// Benchmark runner
|
||||
async function benchmark(name, fn, iterations = benchConfig.iterations) {
|
||||
// Warmup
|
||||
for (let i = 0; i < benchConfig.warmupIterations; i++) {
|
||||
await fn();
|
||||
}
|
||||
|
||||
if (global.gc) global.gc();
|
||||
const memBefore = getMemoryMB();
|
||||
const times = [];
|
||||
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const start = performance.now();
|
||||
await fn();
|
||||
times.push(performance.now() - start);
|
||||
}
|
||||
|
||||
const memAfter = getMemoryMB();
|
||||
times.sort((a, b) => a - b);
|
||||
|
||||
return {
|
||||
name,
|
||||
iterations,
|
||||
min: times[0].toFixed(3),
|
||||
max: times[times.length - 1].toFixed(3),
|
||||
mean: (times.reduce((a, b) => a + b, 0) / times.length).toFixed(3),
|
||||
median: times[Math.floor(times.length / 2)].toFixed(3),
|
||||
p95: times[Math.floor(times.length * 0.95)].toFixed(3),
|
||||
p99: times[Math.floor(times.length * 0.99)].toFixed(3),
|
||||
throughput: (iterations / (times.reduce((a, b) => a + b, 0) / 1000)).toFixed(1),
|
||||
memDelta: (memAfter.heap - memBefore.heap).toFixed(2)
|
||||
};
|
||||
}
|
||||
|
||||
// ============= Kelly Criterion Benchmarks =============
|
||||
function benchmarkKelly() {
|
||||
// Inline implementation for isolated benchmarking
|
||||
class KellyCriterion {
|
||||
calculateFullKelly(winProbability, decimalOdds) {
|
||||
const b = decimalOdds - 1;
|
||||
const p = winProbability;
|
||||
const q = 1 - p;
|
||||
return Math.max(0, (b * p - q) / b);
|
||||
}
|
||||
|
||||
calculateFractionalKelly(winProbability, decimalOdds, fraction = 0.2) {
|
||||
const fullKelly = this.calculateFullKelly(winProbability, decimalOdds);
|
||||
if (fullKelly <= 0) return { stake: 0, edge: 0 };
|
||||
|
||||
const adjustedKelly = Math.min(fullKelly * fraction, 0.05);
|
||||
const edge = (winProbability * decimalOdds) - 1;
|
||||
|
||||
return {
|
||||
stake: adjustedKelly * 10000,
|
||||
stakePercent: adjustedKelly * 100,
|
||||
edge: edge * 100
|
||||
};
|
||||
}
|
||||
|
||||
calculateMultiBetKelly(bets, fraction = 0.2) {
|
||||
const results = bets.map(bet => ({
|
||||
...bet,
|
||||
kelly: this.calculateFractionalKelly(bet.winProbability, bet.decimalOdds, fraction)
|
||||
}));
|
||||
|
||||
const totalKelly = results.reduce((sum, b) => sum + (b.kelly.stakePercent || 0), 0);
|
||||
const scaleFactor = totalKelly > 5 ? 5 / totalKelly : 1;
|
||||
|
||||
return results.map(r => ({
|
||||
...r,
|
||||
kelly: {
|
||||
...r.kelly,
|
||||
stake: (r.kelly.stake || 0) * scaleFactor
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
const kelly = new KellyCriterion();
|
||||
|
||||
return {
|
||||
single: () => kelly.calculateFractionalKelly(0.55, 2.0),
|
||||
multi10: () => kelly.calculateMultiBetKelly(
|
||||
Array(10).fill(null).map(() => ({
|
||||
winProbability: 0.5 + Math.random() * 0.1,
|
||||
decimalOdds: 1.8 + Math.random() * 0.4
|
||||
}))
|
||||
),
|
||||
multi100: () => kelly.calculateMultiBetKelly(
|
||||
Array(100).fill(null).map(() => ({
|
||||
winProbability: 0.5 + Math.random() * 0.1,
|
||||
decimalOdds: 1.8 + Math.random() * 0.4
|
||||
}))
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
// ============= LSTM-Transformer Benchmarks =============
|
||||
function benchmarkLSTMTransformer() {
|
||||
class LSTMCell {
|
||||
constructor(inputSize, hiddenSize) {
|
||||
this.inputSize = inputSize;
|
||||
this.hiddenSize = hiddenSize;
|
||||
const scale = Math.sqrt(2.0 / (inputSize + hiddenSize));
|
||||
this.Wf = Array(hiddenSize).fill(null).map(() =>
|
||||
Array(inputSize + hiddenSize).fill(null).map(() => (Math.random() - 0.5) * 2 * scale)
|
||||
);
|
||||
}
|
||||
|
||||
sigmoid(x) { return 1 / (1 + Math.exp(-Math.max(-500, Math.min(500, x)))); }
|
||||
|
||||
forward(x, hPrev) {
|
||||
const combined = [...x, ...hPrev];
|
||||
const h = this.Wf.map(row =>
|
||||
this.sigmoid(row.reduce((sum, w, j) => sum + w * combined[j], 0))
|
||||
);
|
||||
return { h, c: h };
|
||||
}
|
||||
}
|
||||
|
||||
class LSTMLayer {
|
||||
constructor(inputSize, hiddenSize) {
|
||||
this.cell = new LSTMCell(inputSize, hiddenSize);
|
||||
this.hiddenSize = hiddenSize;
|
||||
}
|
||||
|
||||
forward(sequence) {
|
||||
let h = new Array(this.hiddenSize).fill(0);
|
||||
for (const x of sequence) {
|
||||
const result = this.cell.forward(x, h);
|
||||
h = result.h;
|
||||
}
|
||||
return h;
|
||||
}
|
||||
}
|
||||
|
||||
function softmax(arr) {
|
||||
let max = arr[0];
|
||||
for (let i = 1; i < arr.length; i++) if (arr[i] > max) max = arr[i];
|
||||
const exp = arr.map(x => Math.exp(x - max));
|
||||
const sum = exp.reduce((a, b) => a + b, 0);
|
||||
return exp.map(x => x / sum);
|
||||
}
|
||||
|
||||
function attention(Q, K, V, dim) {
|
||||
const scale = Math.sqrt(dim);
|
||||
const scores = Q.map((q, i) =>
|
||||
K.map((k, j) => q.reduce((sum, qv, idx) => sum + qv * k[idx], 0) / scale)
|
||||
);
|
||||
const weights = scores.map(row => softmax(row));
|
||||
return weights.map((row, i) =>
|
||||
V[0].map((_, j) => row.reduce((sum, w, k) => sum + w * V[k][j], 0))
|
||||
);
|
||||
}
|
||||
|
||||
const lstm = new LSTMLayer(10, 64);
|
||||
|
||||
return {
|
||||
lstmSmall: () => lstm.forward(Array(10).fill(null).map(() =>
|
||||
Array(10).fill(null).map(() => Math.random())
|
||||
)),
|
||||
lstmMedium: () => lstm.forward(Array(50).fill(null).map(() =>
|
||||
Array(10).fill(null).map(() => Math.random())
|
||||
)),
|
||||
lstmLarge: () => lstm.forward(Array(100).fill(null).map(() =>
|
||||
Array(10).fill(null).map(() => Math.random())
|
||||
)),
|
||||
attention: () => {
|
||||
const seq = Array(20).fill(null).map(() =>
|
||||
Array(64).fill(null).map(() => Math.random())
|
||||
);
|
||||
return attention(seq, seq, seq, 64);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// ============= DRL Benchmarks =============
|
||||
function benchmarkDRL() {
|
||||
class NeuralNetwork {
|
||||
constructor(inputDim, hiddenDim, outputDim) {
|
||||
const scale = Math.sqrt(2.0 / (inputDim + hiddenDim));
|
||||
this.W1 = Array(inputDim).fill(null).map(() =>
|
||||
Array(hiddenDim).fill(null).map(() => (Math.random() - 0.5) * 2 * scale)
|
||||
);
|
||||
this.W2 = Array(hiddenDim).fill(null).map(() =>
|
||||
Array(outputDim).fill(null).map(() => (Math.random() - 0.5) * 2 * scale)
|
||||
);
|
||||
this.inputDim = inputDim;
|
||||
this.hiddenDim = hiddenDim;
|
||||
this.outputDim = outputDim;
|
||||
}
|
||||
|
||||
forward(input) {
|
||||
// Layer 1 with ReLU
|
||||
const h = new Array(this.hiddenDim).fill(0);
|
||||
for (let i = 0; i < this.hiddenDim; i++) {
|
||||
for (let j = 0; j < this.inputDim; j++) {
|
||||
h[i] += input[j] * this.W1[j][i];
|
||||
}
|
||||
h[i] = Math.max(0, h[i]);
|
||||
}
|
||||
|
||||
// Output layer
|
||||
const output = new Array(this.outputDim).fill(0);
|
||||
for (let i = 0; i < this.outputDim; i++) {
|
||||
for (let j = 0; j < this.hiddenDim; j++) {
|
||||
output[i] += h[j] * this.W2[j][i];
|
||||
}
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
}
|
||||
|
||||
class ReplayBuffer {
|
||||
constructor(capacity) {
|
||||
this.capacity = capacity;
|
||||
this.buffer = [];
|
||||
this.position = 0;
|
||||
}
|
||||
|
||||
push(data) {
|
||||
if (this.buffer.length < this.capacity) this.buffer.push(null);
|
||||
this.buffer[this.position] = data;
|
||||
this.position = (this.position + 1) % this.capacity;
|
||||
}
|
||||
|
||||
sample(batchSize) {
|
||||
const batch = [];
|
||||
for (let i = 0; i < Math.min(batchSize, this.buffer.length); i++) {
|
||||
batch.push(this.buffer[Math.floor(Math.random() * this.buffer.length)]);
|
||||
}
|
||||
return batch;
|
||||
}
|
||||
}
|
||||
|
||||
const network = new NeuralNetwork(100, 128, 10);
|
||||
const buffer = new ReplayBuffer(10000);
|
||||
|
||||
// Pre-fill buffer
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
buffer.push({ state: Array(100).fill(Math.random()), reward: Math.random() });
|
||||
}
|
||||
|
||||
return {
|
||||
networkForward: () => network.forward(Array(100).fill(null).map(() => Math.random())),
|
||||
bufferSample: () => buffer.sample(64),
|
||||
bufferPush: () => buffer.push({ state: Array(100).fill(Math.random()), reward: Math.random() }),
|
||||
fullStep: () => {
|
||||
const state = Array(100).fill(null).map(() => Math.random());
|
||||
const action = network.forward(state);
|
||||
buffer.push({ state, action, reward: Math.random() });
|
||||
return action;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// ============= Sentiment Benchmarks =============
|
||||
function benchmarkSentiment() {
|
||||
const positiveWords = new Set([
|
||||
'growth', 'profit', 'gains', 'bullish', 'upgrade', 'beat', 'exceeded',
|
||||
'outperform', 'strong', 'surge', 'rally', 'breakthrough', 'innovation'
|
||||
]);
|
||||
|
||||
const negativeWords = new Set([
|
||||
'loss', 'decline', 'bearish', 'downgrade', 'miss', 'below', 'weak',
|
||||
'underperform', 'crash', 'plunge', 'risk', 'concern', 'warning'
|
||||
]);
|
||||
|
||||
function lexiconAnalyze(text) {
|
||||
const words = text.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
|
||||
let score = 0;
|
||||
let count = 0;
|
||||
|
||||
for (const word of words) {
|
||||
if (positiveWords.has(word)) { score++; count++; }
|
||||
else if (negativeWords.has(word)) { score--; count++; }
|
||||
}
|
||||
|
||||
return {
|
||||
score: count > 0 ? score / count : 0,
|
||||
confidence: Math.min(1, count / 5)
|
||||
};
|
||||
}
|
||||
|
||||
function hashEmbed(text, dim = 64) {
|
||||
const words = text.toLowerCase().split(/\s+/);
|
||||
const embedding = new Array(dim).fill(0);
|
||||
|
||||
for (const word of words) {
|
||||
let hash = 0;
|
||||
for (let i = 0; i < word.length; i++) {
|
||||
hash = ((hash << 5) - hash) + word.charCodeAt(i);
|
||||
hash = hash & hash;
|
||||
}
|
||||
for (let i = 0; i < dim; i++) {
|
||||
embedding[i] += Math.sin(hash * (i + 1)) / words.length;
|
||||
}
|
||||
}
|
||||
|
||||
return embedding;
|
||||
}
|
||||
|
||||
const sampleTexts = [
|
||||
'Strong quarterly earnings beat analyst expectations with record revenue growth',
|
||||
'Company warns of significant losses amid declining market demand',
|
||||
'Neutral outlook as market conditions remain stable',
|
||||
'Major breakthrough innovation drives optimistic investor sentiment'
|
||||
];
|
||||
|
||||
return {
|
||||
lexiconSingle: () => lexiconAnalyze(sampleTexts[0]),
|
||||
lexiconBatch: () => sampleTexts.map(t => lexiconAnalyze(t)),
|
||||
embedSingle: () => hashEmbed(sampleTexts[0]),
|
||||
embedBatch: () => sampleTexts.map(t => hashEmbed(t)),
|
||||
fullPipeline: () => {
|
||||
const results = sampleTexts.map(text => ({
|
||||
lexicon: lexiconAnalyze(text),
|
||||
embedding: hashEmbed(text)
|
||||
}));
|
||||
// Aggregate
|
||||
const scores = results.map(r => 0.4 * r.lexicon.score + 0.6 * Math.tanh(
|
||||
r.embedding.reduce((a, b) => a + b, 0) * 0.1
|
||||
));
|
||||
return scores.reduce((a, b) => a + b, 0) / scores.length;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// ============= Main Benchmark Runner =============
|
||||
async function runBenchmarks() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('PRODUCTION MODULE BENCHMARK SUITE');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
console.log(`Iterations: ${benchConfig.iterations} | Warmup: ${benchConfig.warmupIterations}`);
|
||||
console.log();
|
||||
|
||||
const results = [];
|
||||
|
||||
// 1. Kelly Criterion Benchmarks
|
||||
console.log('1. FRACTIONAL KELLY ENGINE');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const kellyBench = benchmarkKelly();
|
||||
|
||||
const kellySingle = await benchmark('Kelly Single Bet', kellyBench.single, 1000);
|
||||
const kellyMulti10 = await benchmark('Kelly Multi (10 bets)', kellyBench.multi10);
|
||||
const kellyMulti100 = await benchmark('Kelly Multi (100 bets)', kellyBench.multi100);
|
||||
|
||||
console.log(` Single bet: ${kellySingle.mean}ms (${kellySingle.throughput}/s)`);
|
||||
console.log(` 10 bets: ${kellyMulti10.mean}ms (${kellyMulti10.throughput}/s)`);
|
||||
console.log(` 100 bets: ${kellyMulti100.mean}ms (${kellyMulti100.throughput}/s)`);
|
||||
results.push(kellySingle, kellyMulti10, kellyMulti100);
|
||||
console.log();
|
||||
|
||||
// 2. LSTM-Transformer Benchmarks
|
||||
console.log('2. HYBRID LSTM-TRANSFORMER');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const lstmBench = benchmarkLSTMTransformer();
|
||||
|
||||
const lstmSmall = await benchmark('LSTM (seq=10)', lstmBench.lstmSmall);
|
||||
const lstmMedium = await benchmark('LSTM (seq=50)', lstmBench.lstmMedium);
|
||||
const lstmLarge = await benchmark('LSTM (seq=100)', lstmBench.lstmLarge);
|
||||
const attention = await benchmark('Attention (seq=20)', lstmBench.attention);
|
||||
|
||||
console.log(` LSTM seq=10: ${lstmSmall.mean}ms (${lstmSmall.throughput}/s)`);
|
||||
console.log(` LSTM seq=50: ${lstmMedium.mean}ms (${lstmMedium.throughput}/s)`);
|
||||
console.log(` LSTM seq=100: ${lstmLarge.mean}ms (${lstmLarge.throughput}/s)`);
|
||||
console.log(` Attention: ${attention.mean}ms (${attention.throughput}/s)`);
|
||||
results.push(lstmSmall, lstmMedium, lstmLarge, attention);
|
||||
console.log();
|
||||
|
||||
// 3. DRL Benchmarks
|
||||
console.log('3. DRL PORTFOLIO MANAGER');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const drlBench = benchmarkDRL();
|
||||
|
||||
const networkFwd = await benchmark('Network Forward', drlBench.networkForward, 1000);
|
||||
const bufferSample = await benchmark('Buffer Sample (64)', drlBench.bufferSample, 1000);
|
||||
const bufferPush = await benchmark('Buffer Push', drlBench.bufferPush, 1000);
|
||||
const fullStep = await benchmark('Full RL Step', drlBench.fullStep);
|
||||
|
||||
console.log(` Network fwd: ${networkFwd.mean}ms (${networkFwd.throughput}/s)`);
|
||||
console.log(` Buffer sample: ${bufferSample.mean}ms (${bufferSample.throughput}/s)`);
|
||||
console.log(` Buffer push: ${bufferPush.mean}ms (${bufferPush.throughput}/s)`);
|
||||
console.log(` Full RL step: ${fullStep.mean}ms (${fullStep.throughput}/s)`);
|
||||
results.push(networkFwd, bufferSample, bufferPush, fullStep);
|
||||
console.log();
|
||||
|
||||
// 4. Sentiment Benchmarks
|
||||
console.log('4. SENTIMENT ALPHA PIPELINE');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const sentBench = benchmarkSentiment();
|
||||
|
||||
const lexSingle = await benchmark('Lexicon Single', sentBench.lexiconSingle, 1000);
|
||||
const lexBatch = await benchmark('Lexicon Batch (4)', sentBench.lexiconBatch);
|
||||
const embedSingle = await benchmark('Embedding Single', sentBench.embedSingle, 1000);
|
||||
const embedBatch = await benchmark('Embedding Batch (4)', sentBench.embedBatch);
|
||||
const fullPipe = await benchmark('Full Pipeline', sentBench.fullPipeline);
|
||||
|
||||
console.log(` Lexicon: ${lexSingle.mean}ms (${lexSingle.throughput}/s)`);
|
||||
console.log(` Lexicon batch: ${lexBatch.mean}ms (${lexBatch.throughput}/s)`);
|
||||
console.log(` Embedding: ${embedSingle.mean}ms (${embedSingle.throughput}/s)`);
|
||||
console.log(` Embed batch: ${embedBatch.mean}ms (${embedBatch.throughput}/s)`);
|
||||
console.log(` Full pipeline: ${fullPipe.mean}ms (${fullPipe.throughput}/s)`);
|
||||
results.push(lexSingle, lexBatch, embedSingle, embedBatch, fullPipe);
|
||||
console.log();
|
||||
|
||||
// Summary
|
||||
console.log('═'.repeat(70));
|
||||
console.log('BENCHMARK SUMMARY');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// Find fastest and slowest
|
||||
const sorted = [...results].sort((a, b) => parseFloat(a.mean) - parseFloat(b.mean));
|
||||
|
||||
console.log('Fastest Operations:');
|
||||
for (const r of sorted.slice(0, 5)) {
|
||||
console.log(` ${r.name.padEnd(25)} ${r.mean}ms (${r.throughput}/s)`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
console.log('Production Readiness:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Module │ Latency │ Throughput │ Status');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const modules = [
|
||||
{ name: 'Kelly Engine', latency: kellyMulti10.mean, throughput: kellyMulti10.throughput },
|
||||
{ name: 'LSTM-Transformer', latency: lstmMedium.mean, throughput: lstmMedium.throughput },
|
||||
{ name: 'DRL Portfolio', latency: fullStep.mean, throughput: fullStep.throughput },
|
||||
{ name: 'Sentiment Alpha', latency: fullPipe.mean, throughput: fullPipe.throughput }
|
||||
];
|
||||
|
||||
for (const m of modules) {
|
||||
const status = parseFloat(m.latency) < 10 ? '✓ Ready' : parseFloat(m.latency) < 50 ? '⚠ Acceptable' : '✗ Optimize';
|
||||
console.log(` ${m.name.padEnd(24)} │ ${m.latency.padStart(6)}ms │ ${m.throughput.padStart(8)}/s │ ${status}`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Benchmark suite completed');
|
||||
console.log('═'.repeat(70));
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// Run benchmarks
|
||||
runBenchmarks().catch(console.error);
|
||||
Reference in New Issue
Block a user