Squashed 'vendor/ruvector/' content from commit b64c2172
git-subtree-dir: vendor/ruvector git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
704
examples/neural-trader/exotic/atomic-arbitrage.js
Normal file
704
examples/neural-trader/exotic/atomic-arbitrage.js
Normal file
@@ -0,0 +1,704 @@
|
||||
/**
|
||||
* Cross-Exchange Atomic Arbitrage
|
||||
*
|
||||
* EXOTIC: Flash loan arbitrage with MEV protection
|
||||
*
|
||||
* Uses @neural-trader/execution with RuVector for:
|
||||
* - Multi-exchange price monitoring
|
||||
* - Atomic execution via flash loans (DeFi)
|
||||
* - MEV (Miner Extractable Value) protection
|
||||
* - Latency-aware order routing
|
||||
* - Triangular and cross-chain arbitrage
|
||||
*
|
||||
* WARNING: This is for educational purposes.
|
||||
* Real arbitrage requires sophisticated infrastructure.
|
||||
*/
|
||||
|
||||
// Arbitrage configuration
|
||||
const arbitrageConfig = {
|
||||
// Exchange configuration
|
||||
exchanges: {
|
||||
binance: { fee: 0.001, latency: 5, liquidity: 'high' },
|
||||
coinbase: { fee: 0.005, latency: 8, liquidity: 'high' },
|
||||
kraken: { fee: 0.002, latency: 12, liquidity: 'medium' },
|
||||
ftx: { fee: 0.0007, latency: 3, liquidity: 'medium' },
|
||||
uniswap: { fee: 0.003, latency: 15000, liquidity: 'medium', type: 'dex' },
|
||||
sushiswap: { fee: 0.003, latency: 15000, liquidity: 'low', type: 'dex' }
|
||||
},
|
||||
|
||||
// Arbitrage parameters
|
||||
params: {
|
||||
minProfitBps: 5, // Minimum profit in basis points
|
||||
maxSlippage: 0.002, // 20 bps max slippage
|
||||
maxPositionUSD: 100000, // Max position size
|
||||
gasPrice: 50, // Gwei
|
||||
gasLimit: 500000, // Gas units for DeFi
|
||||
flashLoanFee: 0.0009 // 9 bps flash loan fee
|
||||
},
|
||||
|
||||
// MEV protection
|
||||
mev: {
|
||||
usePrivatePool: true,
|
||||
maxPriorityFee: 2, // Gwei
|
||||
bundleTimeout: 2000 // ms
|
||||
},
|
||||
|
||||
// Monitoring
|
||||
monitoring: {
|
||||
updateIntervalMs: 100,
|
||||
priceHistorySize: 1000,
|
||||
alertThresholdBps: 10
|
||||
}
|
||||
};
|
||||
|
||||
// Price Feed Simulator
|
||||
class PriceFeed {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.prices = new Map();
|
||||
this.orderBooks = new Map();
|
||||
this.lastUpdate = Date.now();
|
||||
}
|
||||
|
||||
// Simulate price update
|
||||
updatePrices(basePrice, volatility = 0.0001) {
|
||||
const now = Date.now();
|
||||
|
||||
for (const [exchange, params] of Object.entries(this.config.exchanges)) {
|
||||
// Each exchange has slightly different price (market inefficiency)
|
||||
const noise = (Math.random() - 0.5) * volatility * 2;
|
||||
const exchangeSpecificBias = (Math.random() - 0.5) * 0.0005;
|
||||
|
||||
const price = basePrice * (1 + noise + exchangeSpecificBias);
|
||||
|
||||
// Simulate spread
|
||||
const spread = params.type === 'dex' ? 0.002 : 0.0005;
|
||||
|
||||
this.prices.set(exchange, {
|
||||
bid: price * (1 - spread / 2),
|
||||
ask: price * (1 + spread / 2),
|
||||
mid: price,
|
||||
timestamp: now,
|
||||
latency: params.latency
|
||||
});
|
||||
|
||||
// Simulate order book depth
|
||||
this.orderBooks.set(exchange, this.generateOrderBook(price, spread, params.liquidity));
|
||||
}
|
||||
|
||||
this.lastUpdate = now;
|
||||
}
|
||||
|
||||
generateOrderBook(midPrice, spread, liquidityLevel) {
|
||||
const depths = { high: 5, medium: 3, low: 1 };
|
||||
const baseDepth = depths[liquidityLevel] || 1;
|
||||
|
||||
const bids = [];
|
||||
const asks = [];
|
||||
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const bidPrice = midPrice * (1 - spread / 2 - i * 0.0001);
|
||||
const askPrice = midPrice * (1 + spread / 2 + i * 0.0001);
|
||||
|
||||
bids.push({
|
||||
price: bidPrice,
|
||||
quantity: baseDepth * 100000 * Math.exp(-i * 0.3) * (0.5 + Math.random())
|
||||
});
|
||||
|
||||
asks.push({
|
||||
price: askPrice,
|
||||
quantity: baseDepth * 100000 * Math.exp(-i * 0.3) * (0.5 + Math.random())
|
||||
});
|
||||
}
|
||||
|
||||
return { bids, asks };
|
||||
}
|
||||
|
||||
getPrice(exchange) {
|
||||
return this.prices.get(exchange);
|
||||
}
|
||||
|
||||
getOrderBook(exchange) {
|
||||
return this.orderBooks.get(exchange);
|
||||
}
|
||||
|
||||
getAllPrices() {
|
||||
return Object.fromEntries(this.prices);
|
||||
}
|
||||
}
|
||||
|
||||
// Arbitrage Detector
|
||||
class ArbitrageDetector {
|
||||
constructor(config, priceFeed) {
|
||||
this.config = config;
|
||||
this.priceFeed = priceFeed;
|
||||
this.opportunities = [];
|
||||
this.history = [];
|
||||
}
|
||||
|
||||
// Find simple arbitrage (buy low, sell high)
|
||||
findSimpleArbitrage() {
|
||||
const prices = this.priceFeed.getAllPrices();
|
||||
const exchanges = Object.keys(prices);
|
||||
const opportunities = [];
|
||||
|
||||
for (let i = 0; i < exchanges.length; i++) {
|
||||
for (let j = i + 1; j < exchanges.length; j++) {
|
||||
const ex1 = exchanges[i];
|
||||
const ex2 = exchanges[j];
|
||||
|
||||
const p1 = prices[ex1];
|
||||
const p2 = prices[ex2];
|
||||
|
||||
if (!p1 || !p2) continue;
|
||||
|
||||
// Check buy on ex1, sell on ex2
|
||||
const profit1 = this.calculateProfit(ex1, ex2, p1.ask, p2.bid);
|
||||
if (profit1.profitBps > this.config.params.minProfitBps) {
|
||||
opportunities.push({
|
||||
type: 'simple',
|
||||
buyExchange: ex1,
|
||||
sellExchange: ex2,
|
||||
buyPrice: p1.ask,
|
||||
sellPrice: p2.bid,
|
||||
...profit1
|
||||
});
|
||||
}
|
||||
|
||||
// Check buy on ex2, sell on ex1
|
||||
const profit2 = this.calculateProfit(ex2, ex1, p2.ask, p1.bid);
|
||||
if (profit2.profitBps > this.config.params.minProfitBps) {
|
||||
opportunities.push({
|
||||
type: 'simple',
|
||||
buyExchange: ex2,
|
||||
sellExchange: ex1,
|
||||
buyPrice: p2.ask,
|
||||
sellPrice: p1.bid,
|
||||
...profit2
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return opportunities;
|
||||
}
|
||||
|
||||
// Calculate profit after fees
|
||||
calculateProfit(buyExchange, sellExchange, buyPrice, sellPrice) {
|
||||
const buyFee = this.config.exchanges[buyExchange].fee;
|
||||
const sellFee = this.config.exchanges[sellExchange].fee;
|
||||
|
||||
const effectiveBuy = buyPrice * (1 + buyFee);
|
||||
const effectiveSell = sellPrice * (1 - sellFee);
|
||||
|
||||
const grossProfit = (effectiveSell - effectiveBuy) / effectiveBuy;
|
||||
const profitBps = grossProfit * 10000;
|
||||
|
||||
// Estimate gas cost for DeFi exchanges
|
||||
let gasCostBps = 0;
|
||||
if (this.config.exchanges[buyExchange].type === 'dex' ||
|
||||
this.config.exchanges[sellExchange].type === 'dex') {
|
||||
const gasCostUSD = this.config.params.gasPrice * this.config.params.gasLimit * 1e-9 * 2000; // ETH price
|
||||
const tradeSize = this.config.params.maxPositionUSD;
|
||||
gasCostBps = (gasCostUSD / tradeSize) * 10000;
|
||||
}
|
||||
|
||||
const netProfitBps = profitBps - gasCostBps;
|
||||
|
||||
return {
|
||||
grossProfitBps: profitBps,
|
||||
profitBps: netProfitBps,
|
||||
fees: { buy: buyFee, sell: sellFee },
|
||||
gasCostBps,
|
||||
totalLatencyMs: this.config.exchanges[buyExchange].latency +
|
||||
this.config.exchanges[sellExchange].latency
|
||||
};
|
||||
}
|
||||
|
||||
// Find triangular arbitrage
|
||||
findTriangularArbitrage(pairs = ['BTC/USD', 'ETH/USD', 'ETH/BTC']) {
|
||||
// Simulate exchange rates
|
||||
const rates = {
|
||||
'BTC/USD': 50000,
|
||||
'ETH/USD': 3000,
|
||||
'ETH/BTC': 0.06
|
||||
};
|
||||
|
||||
// Add some inefficiency
|
||||
const noisyRates = {};
|
||||
for (const [pair, rate] of Object.entries(rates)) {
|
||||
noisyRates[pair] = rate * (1 + (Math.random() - 0.5) * 0.002);
|
||||
}
|
||||
|
||||
// Check triangular opportunity
|
||||
// USD → BTC → ETH → USD
|
||||
const path1 = {
|
||||
step1: 1 / noisyRates['BTC/USD'], // USD to BTC
|
||||
step2: noisyRates['ETH/BTC'], // BTC to ETH
|
||||
step3: noisyRates['ETH/USD'] // ETH to USD
|
||||
};
|
||||
|
||||
const return1 = path1.step1 * path1.step2 * path1.step3;
|
||||
const profit1 = (return1 - 1) * 10000; // in bps
|
||||
|
||||
// USD → ETH → BTC → USD
|
||||
const path2 = {
|
||||
step1: 1 / noisyRates['ETH/USD'],
|
||||
step2: 1 / noisyRates['ETH/BTC'],
|
||||
step3: noisyRates['BTC/USD']
|
||||
};
|
||||
|
||||
const return2 = path2.step1 * path2.step2 * path2.step3;
|
||||
const profit2 = (return2 - 1) * 10000;
|
||||
|
||||
const opportunities = [];
|
||||
|
||||
if (profit1 > this.config.params.minProfitBps) {
|
||||
opportunities.push({
|
||||
type: 'triangular',
|
||||
path: 'USD → BTC → ETH → USD',
|
||||
profitBps: profit1,
|
||||
rates: path1
|
||||
});
|
||||
}
|
||||
|
||||
if (profit2 > this.config.params.minProfitBps) {
|
||||
opportunities.push({
|
||||
type: 'triangular',
|
||||
path: 'USD → ETH → BTC → USD',
|
||||
profitBps: profit2,
|
||||
rates: path2
|
||||
});
|
||||
}
|
||||
|
||||
return opportunities;
|
||||
}
|
||||
|
||||
// Find flash loan arbitrage opportunity
|
||||
findFlashLoanArbitrage() {
|
||||
const dexExchanges = Object.entries(this.config.exchanges)
|
||||
.filter(([_, params]) => params.type === 'dex')
|
||||
.map(([name]) => name);
|
||||
|
||||
const cexExchanges = Object.entries(this.config.exchanges)
|
||||
.filter(([_, params]) => params.type !== 'dex')
|
||||
.map(([name]) => name);
|
||||
|
||||
const opportunities = [];
|
||||
const prices = this.priceFeed.getAllPrices();
|
||||
|
||||
// DEX to DEX arbitrage with flash loan
|
||||
for (let i = 0; i < dexExchanges.length; i++) {
|
||||
for (let j = i + 1; j < dexExchanges.length; j++) {
|
||||
const dex1 = dexExchanges[i];
|
||||
const dex2 = dexExchanges[j];
|
||||
|
||||
const p1 = prices[dex1];
|
||||
const p2 = prices[dex2];
|
||||
|
||||
if (!p1 || !p2) continue;
|
||||
|
||||
// Flash loan cost
|
||||
const flashFee = this.config.params.flashLoanFee;
|
||||
|
||||
const minMid = Math.min(p1.mid, p2.mid);
|
||||
const spread = minMid > 0 ? Math.abs(p1.mid - p2.mid) / minMid : 0;
|
||||
const profitBps = (spread - flashFee) * 10000;
|
||||
|
||||
if (profitBps > this.config.params.minProfitBps) {
|
||||
opportunities.push({
|
||||
type: 'flash_loan',
|
||||
buyDex: p1.mid < p2.mid ? dex1 : dex2,
|
||||
sellDex: p1.mid < p2.mid ? dex2 : dex1,
|
||||
spread: spread * 10000,
|
||||
flashFee: flashFee * 10000,
|
||||
profitBps,
|
||||
atomic: true
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return opportunities;
|
||||
}
|
||||
|
||||
// Scan all arbitrage types
|
||||
scanAll() {
|
||||
const simple = this.findSimpleArbitrage();
|
||||
const triangular = this.findTriangularArbitrage();
|
||||
const flashLoan = this.findFlashLoanArbitrage();
|
||||
|
||||
this.opportunities = [...simple, ...triangular, ...flashLoan]
|
||||
.sort((a, b) => b.profitBps - a.profitBps);
|
||||
|
||||
this.history.push({
|
||||
timestamp: Date.now(),
|
||||
count: this.opportunities.length,
|
||||
bestProfit: this.opportunities[0]?.profitBps || 0
|
||||
});
|
||||
|
||||
return this.opportunities;
|
||||
}
|
||||
}
|
||||
|
||||
// Execution Engine
|
||||
class ExecutionEngine {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.pendingOrders = [];
|
||||
this.executedTrades = [];
|
||||
this.mevProtection = config.mev.usePrivatePool;
|
||||
}
|
||||
|
||||
// Simulate execution
|
||||
async execute(opportunity) {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Check for MEV risk
|
||||
if (opportunity.type === 'flash_loan' && this.mevProtection) {
|
||||
return this.executeWithMEVProtection(opportunity);
|
||||
}
|
||||
|
||||
// Simulate latency
|
||||
await this.simulateLatency(opportunity.totalLatencyMs || 50);
|
||||
|
||||
// Check slippage
|
||||
const slippage = Math.random() * this.config.params.maxSlippage;
|
||||
const adjustedProfit = opportunity.profitBps - slippage * 10000;
|
||||
|
||||
const result = {
|
||||
success: adjustedProfit > 0,
|
||||
opportunity,
|
||||
actualProfitBps: adjustedProfit,
|
||||
slippage,
|
||||
executionTimeMs: Date.now() - startTime,
|
||||
timestamp: Date.now()
|
||||
};
|
||||
|
||||
this.executedTrades.push(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// Execute with MEV protection (Flashbots-style)
|
||||
async executeWithMEVProtection(opportunity) {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Bundle transactions
|
||||
const bundle = {
|
||||
transactions: [
|
||||
{ type: 'flash_loan_borrow', amount: this.config.params.maxPositionUSD },
|
||||
{ type: 'swap', dex: opportunity.buyDex, direction: 'buy' },
|
||||
{ type: 'swap', dex: opportunity.sellDex, direction: 'sell' },
|
||||
{ type: 'flash_loan_repay' }
|
||||
],
|
||||
priorityFee: this.config.mev.maxPriorityFee
|
||||
};
|
||||
|
||||
// Simulate private pool submission
|
||||
await this.simulateLatency(this.config.mev.bundleTimeout);
|
||||
|
||||
// Check if bundle was included
|
||||
const included = Math.random() > 0.2; // 80% success rate
|
||||
|
||||
if (!included) {
|
||||
return {
|
||||
success: false,
|
||||
reason: 'bundle_not_included',
|
||||
executionTimeMs: Date.now() - startTime
|
||||
};
|
||||
}
|
||||
|
||||
const result = {
|
||||
success: true,
|
||||
opportunity,
|
||||
actualProfitBps: opportunity.profitBps * 0.95, // Some slippage
|
||||
mevProtected: true,
|
||||
executionTimeMs: Date.now() - startTime,
|
||||
timestamp: Date.now()
|
||||
};
|
||||
|
||||
this.executedTrades.push(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
simulateLatency(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, Math.min(ms, 100)));
|
||||
}
|
||||
|
||||
getStats() {
|
||||
const successful = this.executedTrades.filter(t => t.success);
|
||||
const totalProfit = successful.reduce((s, t) => s + (t.actualProfitBps || 0), 0);
|
||||
const avgProfit = successful.length > 0 ? totalProfit / successful.length : 0;
|
||||
|
||||
return {
|
||||
totalTrades: this.executedTrades.length,
|
||||
successfulTrades: successful.length,
|
||||
successRate: this.executedTrades.length > 0
|
||||
? successful.length / this.executedTrades.length
|
||||
: 0,
|
||||
totalProfitBps: totalProfit,
|
||||
avgProfitBps: avgProfit,
|
||||
avgExecutionTimeMs: this.executedTrades.length > 0
|
||||
? this.executedTrades.reduce((s, t) => s + t.executionTimeMs, 0) / this.executedTrades.length
|
||||
: 0
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Latency Monitor
|
||||
class LatencyMonitor {
|
||||
constructor() {
|
||||
this.measurements = new Map();
|
||||
}
|
||||
|
||||
record(exchange, latencyMs) {
|
||||
if (!this.measurements.has(exchange)) {
|
||||
this.measurements.set(exchange, []);
|
||||
}
|
||||
|
||||
const measurements = this.measurements.get(exchange);
|
||||
measurements.push({ latency: latencyMs, timestamp: Date.now() });
|
||||
|
||||
// Keep last 100 measurements
|
||||
if (measurements.length > 100) {
|
||||
measurements.shift();
|
||||
}
|
||||
}
|
||||
|
||||
getStats(exchange) {
|
||||
const measurements = this.measurements.get(exchange);
|
||||
if (!measurements || measurements.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const latencies = measurements.map(m => m.latency);
|
||||
const avg = latencies.reduce((a, b) => a + b, 0) / latencies.length;
|
||||
const sorted = [...latencies].sort((a, b) => a - b);
|
||||
const p50 = sorted[Math.floor(latencies.length * 0.5)];
|
||||
const p99 = sorted[Math.floor(latencies.length * 0.99)];
|
||||
|
||||
return { avg, p50, p99, count: latencies.length };
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('CROSS-EXCHANGE ATOMIC ARBITRAGE');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// 1. Initialize components
|
||||
console.log('1. System Initialization:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const priceFeed = new PriceFeed(arbitrageConfig);
|
||||
const detector = new ArbitrageDetector(arbitrageConfig, priceFeed);
|
||||
const executor = new ExecutionEngine(arbitrageConfig);
|
||||
const latencyMonitor = new LatencyMonitor();
|
||||
|
||||
console.log(` Exchanges: ${Object.keys(arbitrageConfig.exchanges).length}`);
|
||||
console.log(` CEX: ${Object.entries(arbitrageConfig.exchanges).filter(([_, p]) => p.type !== 'dex').length}`);
|
||||
console.log(` DEX: ${Object.entries(arbitrageConfig.exchanges).filter(([_, p]) => p.type === 'dex').length}`);
|
||||
console.log(` Min profit: ${arbitrageConfig.params.minProfitBps} bps`);
|
||||
console.log(` Max position: $${arbitrageConfig.params.maxPositionUSD.toLocaleString()}`);
|
||||
console.log(` MEV protection: ${arbitrageConfig.mev.usePrivatePool ? 'Enabled' : 'Disabled'}`);
|
||||
console.log();
|
||||
|
||||
// 2. Exchange fees
|
||||
console.log('2. Exchange Configuration:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Exchange │ Fee │ Latency │ Liquidity │ Type');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const [exchange, params] of Object.entries(arbitrageConfig.exchanges)) {
|
||||
const type = params.type === 'dex' ? 'DEX' : 'CEX';
|
||||
console.log(` ${exchange.padEnd(11)} │ ${(params.fee * 100).toFixed(2)}% │ ${String(params.latency).padStart(5)}ms │ ${params.liquidity.padEnd(9)} │ ${type}`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 3. Price simulation
|
||||
console.log('3. Price Feed Simulation:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const basePrice = 50000; // BTC price
|
||||
priceFeed.updatePrices(basePrice);
|
||||
|
||||
console.log(' Exchange │ Bid │ Ask │ Spread');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const [exchange, price] of priceFeed.prices) {
|
||||
const spread = ((price.ask - price.bid) / price.mid * 10000).toFixed(1);
|
||||
console.log(` ${exchange.padEnd(11)} │ $${price.bid.toFixed(2).padStart(9)} │ $${price.ask.toFixed(2).padStart(9)} │ ${spread.padStart(5)} bps`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 4. Arbitrage detection
|
||||
console.log('4. Arbitrage Opportunity Scan:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
// Run multiple scans
|
||||
let allOpportunities = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
priceFeed.updatePrices(basePrice, 0.0005); // Add more volatility
|
||||
const opportunities = detector.scanAll();
|
||||
allOpportunities.push(...opportunities);
|
||||
}
|
||||
|
||||
// Deduplicate and sort
|
||||
const uniqueOpps = allOpportunities
|
||||
.filter((opp, idx, arr) =>
|
||||
arr.findIndex(o => o.type === opp.type &&
|
||||
o.buyExchange === opp.buyExchange &&
|
||||
o.sellExchange === opp.sellExchange) === idx
|
||||
)
|
||||
.sort((a, b) => b.profitBps - a.profitBps);
|
||||
|
||||
console.log(` Scans performed: 10`);
|
||||
console.log(` Total found: ${uniqueOpps.length}`);
|
||||
console.log();
|
||||
|
||||
if (uniqueOpps.length > 0) {
|
||||
console.log(' Top Opportunities:');
|
||||
console.log(' Type │ Route │ Profit │ Details');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const opp of uniqueOpps.slice(0, 5)) {
|
||||
let route = '';
|
||||
let details = '';
|
||||
|
||||
if (opp.type === 'simple') {
|
||||
route = `${opp.buyExchange} → ${opp.sellExchange}`;
|
||||
details = `lat=${opp.totalLatencyMs}ms`;
|
||||
} else if (opp.type === 'triangular') {
|
||||
route = opp.path.substring(0, 22);
|
||||
details = '';
|
||||
} else if (opp.type === 'flash_loan') {
|
||||
route = `${opp.buyDex} ⚡ ${opp.sellDex}`;
|
||||
details = 'atomic';
|
||||
}
|
||||
|
||||
console.log(` ${opp.type.padEnd(12)} │ ${route.padEnd(24)} │ ${opp.profitBps.toFixed(1).padStart(5)} bps │ ${details}`);
|
||||
}
|
||||
} else {
|
||||
console.log(' No profitable opportunities found');
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 5. Execute opportunities
|
||||
console.log('5. Execution Simulation:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const opp of uniqueOpps.slice(0, 5)) {
|
||||
const result = await executor.execute(opp);
|
||||
|
||||
if (result.success) {
|
||||
console.log(` ✓ ${opp.type.padEnd(12)} +${result.actualProfitBps.toFixed(1)} bps (${result.executionTimeMs}ms)${result.mevProtected ? ' [MEV-protected]' : ''}`);
|
||||
} else {
|
||||
console.log(` ✗ ${opp.type.padEnd(12)} Failed: ${result.reason || 'slippage'}`);
|
||||
}
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 6. Execution stats
|
||||
console.log('6. Execution Statistics:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const stats = executor.getStats();
|
||||
|
||||
console.log(` Total trades: ${stats.totalTrades}`);
|
||||
console.log(` Successful: ${stats.successfulTrades}`);
|
||||
console.log(` Success rate: ${(stats.successRate * 100).toFixed(1)}%`);
|
||||
console.log(` Total profit: ${stats.totalProfitBps.toFixed(1)} bps`);
|
||||
console.log(` Avg profit: ${stats.avgProfitBps.toFixed(1)} bps`);
|
||||
console.log(` Avg exec time: ${stats.avgExecutionTimeMs.toFixed(0)}ms`);
|
||||
console.log();
|
||||
|
||||
// 7. Order book depth analysis
|
||||
console.log('7. Order Book Depth Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const sampleExchange = 'binance';
|
||||
const orderBook = priceFeed.getOrderBook(sampleExchange);
|
||||
|
||||
console.log(` ${sampleExchange.toUpperCase()} Order Book (Top 5 levels):`);
|
||||
console.log(' Bids │ Asks');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const bid = orderBook.bids[i];
|
||||
const ask = orderBook.asks[i];
|
||||
console.log(` $${bid.price.toFixed(2)} × ${(bid.quantity / 1000).toFixed(0)}k │ $${ask.price.toFixed(2)} × ${(ask.quantity / 1000).toFixed(0)}k`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 8. Latency importance
|
||||
console.log('8. Latency Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(' In arbitrage, latency is critical:');
|
||||
console.log();
|
||||
console.log(' CEX latency: ~5-15ms (colocation advantage)');
|
||||
console.log(' DEX latency: ~15,000ms (block time)');
|
||||
console.log();
|
||||
console.log(' Opportunity lifetime:');
|
||||
console.log(' - Crypto CEX-CEX: 10-100ms');
|
||||
console.log(' - DEX-DEX: 1-2 blocks (~15-30s)');
|
||||
console.log(' - CEX-DEX: Limited by block time');
|
||||
console.log();
|
||||
|
||||
// 9. Risk factors
|
||||
console.log('9. Risk Factors:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(' Key risks in atomic arbitrage:');
|
||||
console.log();
|
||||
console.log(' 1. Execution risk:');
|
||||
console.log(' - Slippage exceeds expected');
|
||||
console.log(' - Partial fills');
|
||||
console.log(' - Network congestion');
|
||||
console.log();
|
||||
console.log(' 2. MEV risk (DeFi):');
|
||||
console.log(' - Frontrunning');
|
||||
console.log(' - Sandwich attacks');
|
||||
console.log(' - Block builder extraction');
|
||||
console.log();
|
||||
console.log(' 3. Smart contract risk:');
|
||||
console.log(' - Flash loan failures');
|
||||
console.log(' - Reentrancy');
|
||||
console.log(' - Oracle manipulation');
|
||||
console.log();
|
||||
|
||||
// 10. RuVector integration
|
||||
console.log('10. RuVector Vector Storage:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Arbitrage opportunities as feature vectors:');
|
||||
console.log();
|
||||
|
||||
if (uniqueOpps.length > 0) {
|
||||
const opp = uniqueOpps[0];
|
||||
const featureVector = [
|
||||
opp.profitBps / 100,
|
||||
opp.type === 'simple' ? 1 : opp.type === 'triangular' ? 2 : 3,
|
||||
(opp.totalLatencyMs || 50) / 1000,
|
||||
opp.gasCostBps ? opp.gasCostBps / 100 : 0,
|
||||
opp.atomic ? 1 : 0
|
||||
];
|
||||
|
||||
console.log(` Opportunity vector:`);
|
||||
console.log(` [${featureVector.map(v => v.toFixed(3)).join(', ')}]`);
|
||||
console.log();
|
||||
console.log(' Dimensions: [profit, type, latency, gas_cost, atomic]');
|
||||
}
|
||||
console.log();
|
||||
console.log(' Use cases:');
|
||||
console.log(' - Pattern recognition for recurring opportunities');
|
||||
console.log(' - Similar opportunity retrieval');
|
||||
console.log(' - Historical profitability analysis');
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Cross-exchange atomic arbitrage analysis completed');
|
||||
console.log('═'.repeat(70));
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
678
examples/neural-trader/exotic/attention-regime-detection.js
Normal file
678
examples/neural-trader/exotic/attention-regime-detection.js
Normal file
@@ -0,0 +1,678 @@
|
||||
/**
|
||||
* Attention-Based Regime Detection
|
||||
*
|
||||
* EXOTIC: Transformer attention for market regime identification
|
||||
*
|
||||
* Uses @neural-trader/neural with RuVector for:
|
||||
* - Self-attention mechanism for temporal patterns
|
||||
* - Multi-head attention for different time scales
|
||||
* - Positional encoding for sequence awareness
|
||||
* - Regime classification (trending, ranging, volatile, quiet)
|
||||
*
|
||||
* Attention reveals which past observations matter most
|
||||
* for current regime identification.
|
||||
*/
|
||||
|
||||
// Attention configuration
|
||||
const attentionConfig = {
|
||||
// Model architecture
|
||||
model: {
|
||||
inputDim: 10, // Features per timestep
|
||||
hiddenDim: 64, // Hidden dimension
|
||||
numHeads: 4, // Attention heads
|
||||
sequenceLength: 50, // Lookback window
|
||||
dropoutRate: 0.1
|
||||
},
|
||||
|
||||
// Regime definitions
|
||||
regimes: {
|
||||
trending_up: { volatility: 'low-medium', momentum: 'positive', persistence: 'high' },
|
||||
trending_down: { volatility: 'low-medium', momentum: 'negative', persistence: 'high' },
|
||||
ranging: { volatility: 'low', momentum: 'neutral', persistence: 'low' },
|
||||
volatile_bull: { volatility: 'high', momentum: 'positive', persistence: 'medium' },
|
||||
volatile_bear: { volatility: 'high', momentum: 'negative', persistence: 'medium' },
|
||||
crisis: { volatility: 'extreme', momentum: 'negative', persistence: 'high' }
|
||||
},
|
||||
|
||||
// Attention analysis
|
||||
analysis: {
|
||||
importanceThreshold: 0.1, // Min attention weight to highlight
|
||||
temporalDecay: 0.95, // Weight decay for older observations
|
||||
regimeChangeThreshold: 0.3 // Confidence to declare regime change
|
||||
}
|
||||
};
|
||||
|
||||
// Softmax function (optimized: avoids spread operator and reduces allocations)
|
||||
function softmax(arr) {
|
||||
if (!arr || arr.length === 0) return [];
|
||||
if (arr.length === 1) return [1.0];
|
||||
|
||||
// Find max without spread operator (2x faster)
|
||||
let max = arr[0];
|
||||
for (let i = 1; i < arr.length; i++) {
|
||||
if (arr[i] > max) max = arr[i];
|
||||
}
|
||||
|
||||
// Single pass for exp and sum
|
||||
const exp = new Array(arr.length);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < arr.length; i++) {
|
||||
exp[i] = Math.exp(arr[i] - max);
|
||||
sum += exp[i];
|
||||
}
|
||||
|
||||
// Guard against sum being 0 (all -Infinity inputs)
|
||||
if (sum === 0 || !isFinite(sum)) {
|
||||
const uniform = 1.0 / arr.length;
|
||||
for (let i = 0; i < arr.length; i++) exp[i] = uniform;
|
||||
return exp;
|
||||
}
|
||||
|
||||
// In-place normalization
|
||||
for (let i = 0; i < arr.length; i++) exp[i] /= sum;
|
||||
return exp;
|
||||
}
|
||||
|
||||
// Matrix multiplication (cache-friendly loop order)
|
||||
function matmul(a, b) {
|
||||
if (!a || !b || a.length === 0 || b.length === 0) return [];
|
||||
|
||||
const rowsA = a.length;
|
||||
const colsA = a[0].length;
|
||||
const colsB = b[0].length;
|
||||
|
||||
// Pre-allocate result with Float64Array for better performance
|
||||
const result = Array(rowsA).fill(null).map(() => new Array(colsB).fill(0));
|
||||
|
||||
// Cache-friendly loop order: i-k-j (row-major access pattern)
|
||||
for (let i = 0; i < rowsA; i++) {
|
||||
const rowA = a[i];
|
||||
const rowR = result[i];
|
||||
for (let k = 0; k < colsA; k++) {
|
||||
const aik = rowA[k];
|
||||
const rowB = b[k];
|
||||
for (let j = 0; j < colsB; j++) {
|
||||
rowR[j] += aik * rowB[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Transpose matrix (handles empty matrices)
|
||||
function transpose(matrix) {
|
||||
if (!matrix || matrix.length === 0 || !matrix[0]) {
|
||||
return [];
|
||||
}
|
||||
return matrix[0].map((_, i) => matrix.map(row => row[i]));
|
||||
}
|
||||
|
||||
// Feature extractor
|
||||
class FeatureExtractor {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
extract(candles) {
|
||||
const features = [];
|
||||
|
||||
for (let i = 1; i < candles.length; i++) {
|
||||
const prev = candles[i - 1];
|
||||
const curr = candles[i];
|
||||
|
||||
// Price features
|
||||
const return_ = (curr.close - prev.close) / prev.close;
|
||||
const range = (curr.high - curr.low) / curr.close;
|
||||
const bodyRatio = Math.abs(curr.close - curr.open) / (curr.high - curr.low + 0.0001);
|
||||
|
||||
// Volume features
|
||||
const volumeChange = i > 1 ? (curr.volume / candles[i - 2].volume - 1) : 0;
|
||||
|
||||
// Technical features
|
||||
const upperShadow = (curr.high - Math.max(curr.open, curr.close)) / (curr.high - curr.low + 0.0001);
|
||||
const lowerShadow = (Math.min(curr.open, curr.close) - curr.low) / (curr.high - curr.low + 0.0001);
|
||||
|
||||
// Lookback features
|
||||
let momentum = 0, volatility = 0;
|
||||
if (i >= 10) {
|
||||
const lookback = candles.slice(i - 10, i);
|
||||
momentum = (curr.close - lookback[0].close) / lookback[0].close;
|
||||
const returns = [];
|
||||
for (let j = 1; j < lookback.length; j++) {
|
||||
returns.push((lookback[j].close - lookback[j - 1].close) / lookback[j - 1].close);
|
||||
}
|
||||
volatility = Math.sqrt(returns.reduce((a, r) => a + r * r, 0) / returns.length);
|
||||
}
|
||||
|
||||
// Direction
|
||||
const direction = return_ > 0 ? 1 : -1;
|
||||
|
||||
// Gap
|
||||
const gap = (curr.open - prev.close) / prev.close;
|
||||
|
||||
features.push([
|
||||
return_,
|
||||
range,
|
||||
bodyRatio,
|
||||
volumeChange,
|
||||
upperShadow,
|
||||
lowerShadow,
|
||||
momentum,
|
||||
volatility,
|
||||
direction,
|
||||
gap
|
||||
]);
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
}
|
||||
|
||||
// Positional Encoding
|
||||
class PositionalEncoding {
|
||||
constructor(seqLength, dim) {
|
||||
this.encoding = [];
|
||||
|
||||
for (let pos = 0; pos < seqLength; pos++) {
|
||||
const posEnc = [];
|
||||
for (let i = 0; i < dim; i++) {
|
||||
if (i % 2 === 0) {
|
||||
posEnc.push(Math.sin(pos / Math.pow(10000, i / dim)));
|
||||
} else {
|
||||
posEnc.push(Math.cos(pos / Math.pow(10000, (i - 1) / dim)));
|
||||
}
|
||||
}
|
||||
this.encoding.push(posEnc);
|
||||
}
|
||||
}
|
||||
|
||||
apply(features) {
|
||||
return features.map((feat, i) => {
|
||||
const posIdx = Math.min(i, this.encoding.length - 1);
|
||||
return feat.map((f, j) => f + (this.encoding[posIdx][j] || 0) * 0.1);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Single Attention Head
|
||||
class AttentionHead {
|
||||
constructor(inputDim, headDim, id) {
|
||||
this.inputDim = inputDim;
|
||||
this.headDim = headDim;
|
||||
this.id = id;
|
||||
|
||||
// Initialize weight matrices (simplified - random init)
|
||||
this.Wq = this.initWeights(inputDim, headDim);
|
||||
this.Wk = this.initWeights(inputDim, headDim);
|
||||
this.Wv = this.initWeights(inputDim, headDim);
|
||||
}
|
||||
|
||||
initWeights(rows, cols) {
|
||||
const weights = [];
|
||||
for (let i = 0; i < rows; i++) {
|
||||
const row = [];
|
||||
for (let j = 0; j < cols; j++) {
|
||||
row.push((Math.random() - 0.5) * 0.1);
|
||||
}
|
||||
weights.push(row);
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
|
||||
forward(features) {
|
||||
const seqLen = features.length;
|
||||
|
||||
// Compute Q, K, V
|
||||
const Q = matmul(features, this.Wq);
|
||||
const K = matmul(features, this.Wk);
|
||||
const V = matmul(features, this.Wv);
|
||||
|
||||
// Scaled dot-product attention
|
||||
const scale = Math.sqrt(this.headDim);
|
||||
const KT = transpose(K);
|
||||
const scores = matmul(Q, KT);
|
||||
|
||||
// Scale and apply softmax
|
||||
const attentionWeights = [];
|
||||
for (let i = 0; i < seqLen; i++) {
|
||||
const scaledScores = scores[i].map(s => s / scale);
|
||||
attentionWeights.push(softmax(scaledScores));
|
||||
}
|
||||
|
||||
// Apply attention to values
|
||||
const output = matmul(attentionWeights, V);
|
||||
|
||||
return { output, attentionWeights };
|
||||
}
|
||||
}
|
||||
|
||||
// Multi-Head Attention
|
||||
class MultiHeadAttention {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.heads = [];
|
||||
this.headDim = Math.floor(config.hiddenDim / config.numHeads);
|
||||
|
||||
for (let i = 0; i < config.numHeads; i++) {
|
||||
this.heads.push(new AttentionHead(config.inputDim, this.headDim, i));
|
||||
}
|
||||
}
|
||||
|
||||
forward(features) {
|
||||
const headOutputs = [];
|
||||
const allAttentionWeights = [];
|
||||
|
||||
for (const head of this.heads) {
|
||||
const { output, attentionWeights } = head.forward(features);
|
||||
headOutputs.push(output);
|
||||
allAttentionWeights.push(attentionWeights);
|
||||
}
|
||||
|
||||
// Concatenate head outputs
|
||||
const concatenated = features.map((_, i) => {
|
||||
return headOutputs.flatMap(output => output[i]);
|
||||
});
|
||||
|
||||
return { output: concatenated, attentionWeights: allAttentionWeights };
|
||||
}
|
||||
}
|
||||
|
||||
// Regime Classifier
|
||||
class RegimeClassifier {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.featureExtractor = new FeatureExtractor(config);
|
||||
this.posEncoding = new PositionalEncoding(config.model.sequenceLength, config.model.inputDim);
|
||||
this.attention = new MultiHeadAttention(config.model);
|
||||
this.regimeHistory = [];
|
||||
}
|
||||
|
||||
// Classify regime based on features
|
||||
classifyFromFeatures(aggregatedFeatures) {
|
||||
const [avgReturn, avgRange, _, __, ___, ____, momentum, volatility] = aggregatedFeatures;
|
||||
|
||||
// Rule-based classification (in production, use learned classifier)
|
||||
let regime = 'unknown';
|
||||
let confidence = 0;
|
||||
|
||||
const volLevel = volatility > 0.03 ? 'extreme' : volatility > 0.02 ? 'high' : volatility > 0.01 ? 'medium' : 'low';
|
||||
const momLevel = momentum > 0.02 ? 'strong_positive' : momentum > 0 ? 'positive' : momentum < -0.02 ? 'strong_negative' : momentum < 0 ? 'negative' : 'neutral';
|
||||
|
||||
if (volLevel === 'extreme' && momLevel.includes('negative')) {
|
||||
regime = 'crisis';
|
||||
confidence = 0.85;
|
||||
} else if (volLevel === 'high') {
|
||||
if (momLevel.includes('positive')) {
|
||||
regime = 'volatile_bull';
|
||||
confidence = 0.7;
|
||||
} else {
|
||||
regime = 'volatile_bear';
|
||||
confidence = 0.7;
|
||||
}
|
||||
} else if (volLevel === 'low' && Math.abs(momentum) < 0.005) {
|
||||
regime = 'ranging';
|
||||
confidence = 0.75;
|
||||
} else if (momLevel.includes('positive')) {
|
||||
regime = 'trending_up';
|
||||
confidence = 0.65 + Math.abs(momentum) * 5;
|
||||
} else if (momLevel.includes('negative')) {
|
||||
regime = 'trending_down';
|
||||
confidence = 0.65 + Math.abs(momentum) * 5;
|
||||
} else {
|
||||
regime = 'ranging';
|
||||
confidence = 0.5;
|
||||
}
|
||||
|
||||
return { regime, confidence: Math.min(0.95, confidence) };
|
||||
}
|
||||
|
||||
analyze(candles) {
|
||||
// Extract features
|
||||
const features = this.featureExtractor.extract(candles);
|
||||
|
||||
if (features.length < 10) {
|
||||
return { regime: 'insufficient_data', confidence: 0, attentionInsights: null };
|
||||
}
|
||||
|
||||
// Apply positional encoding
|
||||
const encodedFeatures = this.posEncoding.apply(features);
|
||||
|
||||
// Run through attention
|
||||
const { output, attentionWeights } = this.attention.forward(encodedFeatures);
|
||||
|
||||
// Aggregate attention-weighted features
|
||||
const lastAttention = attentionWeights[0][attentionWeights[0].length - 1];
|
||||
const aggregated = new Array(this.config.model.inputDim).fill(0);
|
||||
|
||||
for (let i = 0; i < features.length; i++) {
|
||||
for (let j = 0; j < this.config.model.inputDim; j++) {
|
||||
aggregated[j] += lastAttention[i] * features[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
// Classify regime
|
||||
const { regime, confidence } = this.classifyFromFeatures(aggregated);
|
||||
|
||||
// Analyze attention patterns
|
||||
const attentionInsights = this.analyzeAttention(attentionWeights, features);
|
||||
|
||||
// Detect regime change
|
||||
const regimeChange = this.detectRegimeChange(regime, confidence);
|
||||
|
||||
const result = {
|
||||
regime,
|
||||
confidence,
|
||||
attentionInsights,
|
||||
regimeChange,
|
||||
aggregatedFeatures: aggregated
|
||||
};
|
||||
|
||||
this.regimeHistory.push({
|
||||
timestamp: Date.now(),
|
||||
...result
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
analyzeAttention(attentionWeights, features) {
|
||||
const numHeads = attentionWeights.length;
|
||||
const seqLen = attentionWeights[0].length;
|
||||
|
||||
// Find most important timesteps per head
|
||||
const importantTimesteps = [];
|
||||
|
||||
for (let h = 0; h < numHeads; h++) {
|
||||
const lastRow = attentionWeights[h][seqLen - 1];
|
||||
const sorted = lastRow.map((w, i) => ({ idx: i, weight: w }))
|
||||
.sort((a, b) => b.weight - a.weight)
|
||||
.slice(0, 5);
|
||||
|
||||
importantTimesteps.push({
|
||||
head: h,
|
||||
topTimesteps: sorted,
|
||||
focusRange: this.classifyFocusRange(sorted)
|
||||
});
|
||||
}
|
||||
|
||||
// Attention entropy (uniformity of attention)
|
||||
const avgEntropy = attentionWeights.reduce((sum, headWeights) => {
|
||||
const lastRow = headWeights[seqLen - 1];
|
||||
const entropy = -lastRow.reduce((e, w) => {
|
||||
if (w > 0) e += w * Math.log(w);
|
||||
return e;
|
||||
}, 0);
|
||||
return sum + entropy;
|
||||
}, 0) / numHeads;
|
||||
|
||||
return {
|
||||
importantTimesteps,
|
||||
avgEntropy,
|
||||
interpretation: avgEntropy < 2 ? 'focused' : avgEntropy < 3 ? 'moderate' : 'diffuse'
|
||||
};
|
||||
}
|
||||
|
||||
classifyFocusRange(topTimesteps) {
|
||||
const avgIdx = topTimesteps.reduce((s, t) => s + t.idx, 0) / topTimesteps.length;
|
||||
const maxIdx = topTimesteps[0].idx;
|
||||
|
||||
if (maxIdx < 10) return 'distant_past';
|
||||
if (maxIdx < 30) return 'medium_term';
|
||||
return 'recent';
|
||||
}
|
||||
|
||||
detectRegimeChange(currentRegime, confidence) {
|
||||
if (this.regimeHistory.length < 5) {
|
||||
return { changed: false, reason: 'insufficient_history' };
|
||||
}
|
||||
|
||||
const recentRegimes = this.regimeHistory.slice(-5).map(r => r.regime);
|
||||
const prevRegime = recentRegimes[recentRegimes.length - 2];
|
||||
|
||||
if (currentRegime !== prevRegime && confidence > this.config.analysis.regimeChangeThreshold) {
|
||||
return {
|
||||
changed: true,
|
||||
from: prevRegime,
|
||||
to: currentRegime,
|
||||
confidence
|
||||
};
|
||||
}
|
||||
|
||||
return { changed: false };
|
||||
}
|
||||
}
|
||||
|
||||
// Generate synthetic market data with regimes
|
||||
function generateRegimeData(n, seed = 42) {
|
||||
const data = [];
|
||||
let price = 100;
|
||||
|
||||
let rng = seed;
|
||||
const random = () => {
|
||||
rng = (rng * 9301 + 49297) % 233280;
|
||||
return rng / 233280;
|
||||
};
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
// Regime switching
|
||||
const regimePhase = i % 200;
|
||||
let drift = 0, volatility = 0.01;
|
||||
|
||||
if (regimePhase < 50) {
|
||||
// Trending up
|
||||
drift = 0.002;
|
||||
volatility = 0.012;
|
||||
} else if (regimePhase < 80) {
|
||||
// Volatile
|
||||
drift = -0.001;
|
||||
volatility = 0.03;
|
||||
} else if (regimePhase < 130) {
|
||||
// Ranging
|
||||
drift = 0;
|
||||
volatility = 0.008;
|
||||
} else if (regimePhase < 180) {
|
||||
// Trending down
|
||||
drift = -0.002;
|
||||
volatility = 0.015;
|
||||
} else {
|
||||
// Crisis burst
|
||||
drift = -0.01;
|
||||
volatility = 0.05;
|
||||
}
|
||||
|
||||
const return_ = drift + volatility * (random() + random() - 1);
|
||||
const open = price;
|
||||
price = price * (1 + return_);
|
||||
|
||||
const high = Math.max(open, price) * (1 + random() * volatility);
|
||||
const low = Math.min(open, price) * (1 - random() * volatility);
|
||||
const volume = 1000000 * (0.5 + random() + volatility * 10);
|
||||
|
||||
data.push({
|
||||
timestamp: Date.now() - (n - i) * 60000,
|
||||
open,
|
||||
high,
|
||||
low,
|
||||
close: price,
|
||||
volume
|
||||
});
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('ATTENTION-BASED REGIME DETECTION');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// 1. Generate data
|
||||
console.log('1. Market Data Generation:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const data = generateRegimeData(500);
|
||||
|
||||
console.log(` Candles generated: ${data.length}`);
|
||||
console.log(` Price range: $${Math.min(...data.map(d => d.low)).toFixed(2)} - $${Math.max(...data.map(d => d.high)).toFixed(2)}`);
|
||||
console.log();
|
||||
|
||||
// 2. Initialize classifier
|
||||
console.log('2. Attention Model Configuration:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const classifier = new RegimeClassifier(attentionConfig);
|
||||
|
||||
console.log(` Input dimension: ${attentionConfig.model.inputDim}`);
|
||||
console.log(` Hidden dimension: ${attentionConfig.model.hiddenDim}`);
|
||||
console.log(` Attention heads: ${attentionConfig.model.numHeads}`);
|
||||
console.log(` Sequence length: ${attentionConfig.model.sequenceLength}`);
|
||||
console.log();
|
||||
|
||||
// 3. Run analysis across data
|
||||
console.log('3. Rolling Regime Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const results = [];
|
||||
const windowSize = attentionConfig.model.sequenceLength + 10;
|
||||
|
||||
for (let i = windowSize; i < data.length; i += 20) {
|
||||
const window = data.slice(i - windowSize, i);
|
||||
const analysis = classifier.analyze(window);
|
||||
results.push({
|
||||
index: i,
|
||||
price: data[i].close,
|
||||
...analysis
|
||||
});
|
||||
}
|
||||
|
||||
console.log(` Analysis points: ${results.length}`);
|
||||
console.log();
|
||||
|
||||
// 4. Regime distribution
|
||||
console.log('4. Regime Distribution:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const regimeCounts = {};
|
||||
for (const r of results) {
|
||||
regimeCounts[r.regime] = (regimeCounts[r.regime] || 0) + 1;
|
||||
}
|
||||
|
||||
for (const [regime, count] of Object.entries(regimeCounts).sort((a, b) => b[1] - a[1])) {
|
||||
const pct = (count / results.length * 100).toFixed(1);
|
||||
const bar = '█'.repeat(Math.floor(count / results.length * 40));
|
||||
console.log(` ${regime.padEnd(15)} ${bar.padEnd(40)} ${pct}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 5. Attention insights
|
||||
console.log('5. Attention Pattern Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const lastResult = results[results.length - 1];
|
||||
if (lastResult.attentionInsights) {
|
||||
console.log(` Attention interpretation: ${lastResult.attentionInsights.interpretation}`);
|
||||
console.log(` Average entropy: ${lastResult.attentionInsights.avgEntropy.toFixed(3)}`);
|
||||
console.log();
|
||||
|
||||
console.log(' Head-by-Head Focus:');
|
||||
for (const head of lastResult.attentionInsights.importantTimesteps) {
|
||||
console.log(` - Head ${head.head}: focuses on ${head.focusRange} (top weight: ${head.topTimesteps[0].weight.toFixed(3)})`);
|
||||
}
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 6. Regime changes
|
||||
console.log('6. Detected Regime Changes:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const changes = results.filter(r => r.regimeChange?.changed);
|
||||
console.log(` Total regime changes: ${changes.length}`);
|
||||
console.log();
|
||||
|
||||
for (const change of changes.slice(-5)) {
|
||||
console.log(` Index ${change.index}: ${change.regimeChange.from} → ${change.regimeChange.to} (conf: ${(change.regimeChange.confidence * 100).toFixed(0)}%)`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 7. Sample analysis
|
||||
console.log('7. Sample Analysis (Last 5 Windows):');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Index │ Price │ Regime │ Confidence');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const r of results.slice(-5)) {
|
||||
console.log(` ${String(r.index).padStart(5)} │ $${r.price.toFixed(2).padStart(6)} │ ${r.regime.padEnd(15)} │ ${(r.confidence * 100).toFixed(0)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 8. Trading implications
|
||||
console.log('8. Trading Implications by Regime:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const implications = {
|
||||
trending_up: 'Go long, use trailing stops, momentum strategies work',
|
||||
trending_down: 'Go short or stay out, mean reversion fails',
|
||||
ranging: 'Mean reversion works, sell options, tight stops',
|
||||
volatile_bull: 'Long with caution, wide stops, reduce size',
|
||||
volatile_bear: 'Stay defensive, hedge, reduce exposure',
|
||||
crisis: 'Risk-off, cash is king, volatility strategies'
|
||||
};
|
||||
|
||||
for (const [regime, implication] of Object.entries(implications)) {
|
||||
console.log(` ${regime}:`);
|
||||
console.log(` → ${implication}`);
|
||||
console.log();
|
||||
}
|
||||
|
||||
// 9. Attention visualization
|
||||
console.log('9. Attention Weights (Last Analysis):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
if (lastResult.attentionInsights) {
|
||||
console.log(' Timestep importance (Head 0, recent 20 bars):');
|
||||
|
||||
const head0Weights = lastResult.attentionInsights.importantTimesteps[0].topTimesteps;
|
||||
const maxWeight = Math.max(...head0Weights.map(t => t.weight));
|
||||
|
||||
// Show simplified attention bar
|
||||
let attentionBar = ' ';
|
||||
for (let i = 0; i < 20; i++) {
|
||||
const timestep = head0Weights.find(t => t.idx === i + 30);
|
||||
if (timestep && timestep.weight > 0.05) {
|
||||
const intensity = Math.floor(timestep.weight / maxWeight * 4);
|
||||
attentionBar += ['░', '▒', '▓', '█', '█'][intensity];
|
||||
} else {
|
||||
attentionBar += '·';
|
||||
}
|
||||
}
|
||||
console.log(attentionBar);
|
||||
console.log(' ^past recent^');
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 10. RuVector integration
|
||||
console.log('10. RuVector Vector Storage:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Attention patterns can be vectorized and stored:');
|
||||
console.log();
|
||||
|
||||
if (lastResult.aggregatedFeatures) {
|
||||
const vec = lastResult.aggregatedFeatures.slice(0, 5).map(v => v.toFixed(4));
|
||||
console.log(` Aggregated feature vector (first 5 dims):`);
|
||||
console.log(` [${vec.join(', ')}]`);
|
||||
console.log();
|
||||
console.log(' Use cases:');
|
||||
console.log(' - Find similar regime patterns via HNSW search');
|
||||
console.log(' - Cluster historical regimes');
|
||||
console.log(' - Regime-based strategy selection');
|
||||
}
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Attention-based regime detection completed');
|
||||
console.log('═'.repeat(70));
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
417
examples/neural-trader/exotic/benchmark.js
Normal file
417
examples/neural-trader/exotic/benchmark.js
Normal file
@@ -0,0 +1,417 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Performance Benchmark Suite for Exotic Neural-Trader Examples
|
||||
*
|
||||
* Measures execution time, memory usage, and throughput for:
|
||||
* - GNN correlation network
|
||||
* - Attention regime detection
|
||||
* - Quantum portfolio optimization
|
||||
* - Multi-agent swarm
|
||||
* - RL agent
|
||||
* - Hyperbolic embeddings
|
||||
*/
|
||||
|
||||
import { performance } from 'perf_hooks';
|
||||
|
||||
// Benchmark configuration
|
||||
const config = {
|
||||
iterations: 10,
|
||||
warmupIterations: 3,
|
||||
dataSizes: {
|
||||
small: { assets: 10, days: 50 },
|
||||
medium: { assets: 20, days: 200 },
|
||||
large: { assets: 50, days: 500 }
|
||||
}
|
||||
};
|
||||
|
||||
// Memory tracking
|
||||
function getMemoryUsage() {
|
||||
const usage = process.memoryUsage();
|
||||
return {
|
||||
heapUsed: Math.round(usage.heapUsed / 1024 / 1024 * 100) / 100,
|
||||
heapTotal: Math.round(usage.heapTotal / 1024 / 1024 * 100) / 100,
|
||||
external: Math.round(usage.external / 1024 / 1024 * 100) / 100
|
||||
};
|
||||
}
|
||||
|
||||
// Benchmark runner
|
||||
async function benchmark(name, fn, iterations = config.iterations) {
|
||||
// Warmup
|
||||
for (let i = 0; i < config.warmupIterations; i++) {
|
||||
await fn();
|
||||
}
|
||||
|
||||
// Force GC if available
|
||||
if (global.gc) global.gc();
|
||||
|
||||
const memBefore = getMemoryUsage();
|
||||
const times = [];
|
||||
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const start = performance.now();
|
||||
await fn();
|
||||
times.push(performance.now() - start);
|
||||
}
|
||||
|
||||
const memAfter = getMemoryUsage();
|
||||
|
||||
times.sort((a, b) => a - b);
|
||||
|
||||
return {
|
||||
name,
|
||||
iterations,
|
||||
min: times[0].toFixed(2),
|
||||
max: times[times.length - 1].toFixed(2),
|
||||
mean: (times.reduce((a, b) => a + b, 0) / times.length).toFixed(2),
|
||||
median: times[Math.floor(times.length / 2)].toFixed(2),
|
||||
p95: times[Math.floor(times.length * 0.95)].toFixed(2),
|
||||
memDelta: (memAfter.heapUsed - memBefore.heapUsed).toFixed(2),
|
||||
throughput: (iterations / (times.reduce((a, b) => a + b, 0) / 1000)).toFixed(1)
|
||||
};
|
||||
}
|
||||
|
||||
// ============= GNN Correlation Network Benchmark =============
|
||||
function benchmarkGNN() {
|
||||
// Inline minimal implementation for benchmarking
|
||||
class RollingStats {
|
||||
constructor(windowSize) {
|
||||
this.windowSize = windowSize;
|
||||
this.values = [];
|
||||
this.sum = 0;
|
||||
this.sumSq = 0;
|
||||
}
|
||||
add(value) {
|
||||
if (this.values.length >= this.windowSize) {
|
||||
const removed = this.values.shift();
|
||||
this.sum -= removed;
|
||||
this.sumSq -= removed * removed;
|
||||
}
|
||||
this.values.push(value);
|
||||
this.sum += value;
|
||||
this.sumSq += value * value;
|
||||
}
|
||||
get mean() { return this.values.length > 0 ? this.sum / this.values.length : 0; }
|
||||
get variance() {
|
||||
if (this.values.length < 2) return 0;
|
||||
const n = this.values.length;
|
||||
return (this.sumSq - (this.sum * this.sum) / n) / (n - 1);
|
||||
}
|
||||
}
|
||||
|
||||
function calculateCorrelation(returns1, returns2) {
|
||||
if (returns1.length !== returns2.length || returns1.length < 2) return 0;
|
||||
const n = returns1.length;
|
||||
const mean1 = returns1.reduce((a, b) => a + b, 0) / n;
|
||||
const mean2 = returns2.reduce((a, b) => a + b, 0) / n;
|
||||
let cov = 0, var1 = 0, var2 = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
const d1 = returns1[i] - mean1;
|
||||
const d2 = returns2[i] - mean2;
|
||||
cov += d1 * d2;
|
||||
var1 += d1 * d1;
|
||||
var2 += d2 * d2;
|
||||
}
|
||||
if (var1 === 0 || var2 === 0) return 0;
|
||||
return cov / Math.sqrt(var1 * var2);
|
||||
}
|
||||
|
||||
return async (size) => {
|
||||
const { assets, days } = config.dataSizes[size];
|
||||
// Generate returns data
|
||||
const data = [];
|
||||
for (let i = 0; i < assets; i++) {
|
||||
const returns = [];
|
||||
for (let j = 0; j < days; j++) {
|
||||
returns.push((Math.random() - 0.5) * 0.02);
|
||||
}
|
||||
data.push(returns);
|
||||
}
|
||||
|
||||
// Build correlation matrix
|
||||
const matrix = [];
|
||||
for (let i = 0; i < assets; i++) {
|
||||
matrix[i] = [];
|
||||
for (let j = 0; j < assets; j++) {
|
||||
matrix[i][j] = i === j ? 1 : calculateCorrelation(data[i], data[j]);
|
||||
}
|
||||
}
|
||||
return matrix;
|
||||
};
|
||||
}
|
||||
|
||||
// ============= Matrix Multiplication Benchmark =============
|
||||
function benchmarkMatmul() {
|
||||
// Original (i-j-k order)
|
||||
function matmulOriginal(a, b) {
|
||||
const rowsA = a.length;
|
||||
const colsA = a[0].length;
|
||||
const colsB = b[0].length;
|
||||
const result = Array(rowsA).fill(null).map(() => Array(colsB).fill(0));
|
||||
for (let i = 0; i < rowsA; i++) {
|
||||
for (let j = 0; j < colsB; j++) {
|
||||
for (let k = 0; k < colsA; k++) {
|
||||
result[i][j] += a[i][k] * b[k][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Optimized (i-k-j order - cache friendly)
|
||||
function matmulOptimized(a, b) {
|
||||
const rowsA = a.length;
|
||||
const colsA = a[0].length;
|
||||
const colsB = b[0].length;
|
||||
const result = Array(rowsA).fill(null).map(() => new Array(colsB).fill(0));
|
||||
for (let i = 0; i < rowsA; i++) {
|
||||
const rowA = a[i];
|
||||
const rowR = result[i];
|
||||
for (let k = 0; k < colsA; k++) {
|
||||
const aik = rowA[k];
|
||||
const rowB = b[k];
|
||||
for (let j = 0; j < colsB; j++) {
|
||||
rowR[j] += aik * rowB[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return { matmulOriginal, matmulOptimized };
|
||||
}
|
||||
|
||||
// ============= Object Pool Benchmark =============
|
||||
function benchmarkObjectPool() {
|
||||
class Complex {
|
||||
constructor(real, imag = 0) {
|
||||
this.real = real;
|
||||
this.imag = imag;
|
||||
}
|
||||
add(other) {
|
||||
return new Complex(this.real + other.real, this.imag + other.imag);
|
||||
}
|
||||
multiply(other) {
|
||||
return new Complex(
|
||||
this.real * other.real - this.imag * other.imag,
|
||||
this.real * other.imag + this.imag * other.real
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
class ComplexPool {
|
||||
constructor(initialSize = 1024) {
|
||||
this.pool = [];
|
||||
this.index = 0;
|
||||
for (let i = 0; i < initialSize; i++) {
|
||||
this.pool.push(new Complex(0, 0));
|
||||
}
|
||||
}
|
||||
acquire(real = 0, imag = 0) {
|
||||
if (this.index < this.pool.length) {
|
||||
const c = this.pool[this.index++];
|
||||
c.real = real;
|
||||
c.imag = imag;
|
||||
return c;
|
||||
}
|
||||
return new Complex(real, imag);
|
||||
}
|
||||
reset() { this.index = 0; }
|
||||
}
|
||||
|
||||
return { Complex, ComplexPool };
|
||||
}
|
||||
|
||||
// ============= Ring Buffer vs Array Benchmark =============
|
||||
function benchmarkRingBuffer() {
|
||||
class RingBuffer {
|
||||
constructor(capacity) {
|
||||
this.capacity = capacity;
|
||||
this.buffer = new Array(capacity);
|
||||
this.head = 0;
|
||||
this.size = 0;
|
||||
}
|
||||
push(item) {
|
||||
this.buffer[this.head] = item;
|
||||
this.head = (this.head + 1) % this.capacity;
|
||||
if (this.size < this.capacity) this.size++;
|
||||
}
|
||||
getAll() {
|
||||
if (this.size < this.capacity) return this.buffer.slice(0, this.size);
|
||||
return [...this.buffer.slice(this.head), ...this.buffer.slice(0, this.head)];
|
||||
}
|
||||
}
|
||||
|
||||
return { RingBuffer };
|
||||
}
|
||||
|
||||
// ============= Main Benchmark Runner =============
|
||||
async function runBenchmarks() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('EXOTIC NEURAL-TRADER PERFORMANCE BENCHMARKS');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
console.log(`Iterations: ${config.iterations} | Warmup: ${config.warmupIterations}`);
|
||||
console.log();
|
||||
|
||||
const results = [];
|
||||
|
||||
// 1. GNN Correlation Matrix
|
||||
console.log('1. GNN Correlation Matrix Construction');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const gnnFn = benchmarkGNN();
|
||||
for (const size of ['small', 'medium', 'large']) {
|
||||
const { assets, days } = config.dataSizes[size];
|
||||
const result = await benchmark(
|
||||
`GNN ${size} (${assets}x${days})`,
|
||||
() => gnnFn(size),
|
||||
config.iterations
|
||||
);
|
||||
results.push(result);
|
||||
console.log(` ${result.name.padEnd(25)} mean: ${result.mean}ms | p95: ${result.p95}ms | mem: ${result.memDelta}MB`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 2. Matrix Multiplication Comparison
|
||||
console.log('2. Matrix Multiplication (Original vs Optimized)');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const { matmulOriginal, matmulOptimized } = benchmarkMatmul();
|
||||
const matrixSizes = [50, 100, 200];
|
||||
|
||||
for (const n of matrixSizes) {
|
||||
const a = Array(n).fill(null).map(() => Array(n).fill(null).map(() => Math.random()));
|
||||
const b = Array(n).fill(null).map(() => Array(n).fill(null).map(() => Math.random()));
|
||||
|
||||
const origResult = await benchmark(`Original ${n}x${n}`, () => matmulOriginal(a, b), 5);
|
||||
const optResult = await benchmark(`Optimized ${n}x${n}`, () => matmulOptimized(a, b), 5);
|
||||
|
||||
const speedup = (parseFloat(origResult.mean) / parseFloat(optResult.mean)).toFixed(2);
|
||||
console.log(` ${n}x${n}: Original ${origResult.mean}ms → Optimized ${optResult.mean}ms (${speedup}x speedup)`);
|
||||
|
||||
results.push(origResult, optResult);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 3. Object Pool vs Direct Allocation
|
||||
console.log('3. Object Pool vs Direct Allocation (Complex numbers)');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const { Complex, ComplexPool } = benchmarkObjectPool();
|
||||
const pool = new ComplexPool(10000);
|
||||
const allocCount = 10000;
|
||||
|
||||
const directResult = await benchmark('Direct allocation', () => {
|
||||
const arr = [];
|
||||
for (let i = 0; i < allocCount; i++) {
|
||||
arr.push(new Complex(Math.random(), Math.random()));
|
||||
}
|
||||
return arr.length;
|
||||
}, 10);
|
||||
|
||||
const pooledResult = await benchmark('Pooled allocation', () => {
|
||||
pool.reset();
|
||||
const arr = [];
|
||||
for (let i = 0; i < allocCount; i++) {
|
||||
arr.push(pool.acquire(Math.random(), Math.random()));
|
||||
}
|
||||
return arr.length;
|
||||
}, 10);
|
||||
|
||||
const allocSpeedup = (parseFloat(directResult.mean) / parseFloat(pooledResult.mean)).toFixed(2);
|
||||
console.log(` Direct: ${directResult.mean}ms | Pooled: ${pooledResult.mean}ms (${allocSpeedup}x speedup)`);
|
||||
console.log(` Memory - Direct: ${directResult.memDelta}MB | Pooled: ${pooledResult.memDelta}MB`);
|
||||
results.push(directResult, pooledResult);
|
||||
console.log();
|
||||
|
||||
// 4. Ring Buffer vs Array.shift()
|
||||
console.log('4. Ring Buffer vs Array.shift() (Bounded queue)');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const { RingBuffer } = benchmarkRingBuffer();
|
||||
const capacity = 1000;
|
||||
const operations = 50000;
|
||||
|
||||
const arrayResult = await benchmark('Array.shift()', () => {
|
||||
const arr = [];
|
||||
for (let i = 0; i < operations; i++) {
|
||||
if (arr.length >= capacity) arr.shift();
|
||||
arr.push(i);
|
||||
}
|
||||
return arr.length;
|
||||
}, 5);
|
||||
|
||||
const ringResult = await benchmark('RingBuffer', () => {
|
||||
const rb = new RingBuffer(capacity);
|
||||
for (let i = 0; i < operations; i++) {
|
||||
rb.push(i);
|
||||
}
|
||||
return rb.size;
|
||||
}, 5);
|
||||
|
||||
const ringSpeedup = (parseFloat(arrayResult.mean) / parseFloat(ringResult.mean)).toFixed(2);
|
||||
console.log(` Array.shift(): ${arrayResult.mean}ms | RingBuffer: ${ringResult.mean}ms (${ringSpeedup}x speedup)`);
|
||||
results.push(arrayResult, ringResult);
|
||||
console.log();
|
||||
|
||||
// 5. Softmax Performance
|
||||
console.log('5. Softmax Function Performance');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
function softmaxOriginal(arr) {
|
||||
const max = Math.max(...arr);
|
||||
const exp = arr.map(x => Math.exp(x - max));
|
||||
const sum = exp.reduce((a, b) => a + b, 0);
|
||||
return exp.map(x => x / sum);
|
||||
}
|
||||
|
||||
function softmaxOptimized(arr) {
|
||||
if (!arr || arr.length === 0) return [];
|
||||
if (arr.length === 1) return [1.0];
|
||||
let max = arr[0];
|
||||
for (let i = 1; i < arr.length; i++) if (arr[i] > max) max = arr[i];
|
||||
const exp = new Array(arr.length);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < arr.length; i++) {
|
||||
exp[i] = Math.exp(arr[i] - max);
|
||||
sum += exp[i];
|
||||
}
|
||||
if (sum === 0 || !isFinite(sum)) {
|
||||
const uniform = 1.0 / arr.length;
|
||||
for (let i = 0; i < arr.length; i++) exp[i] = uniform;
|
||||
return exp;
|
||||
}
|
||||
for (let i = 0; i < arr.length; i++) exp[i] /= sum;
|
||||
return exp;
|
||||
}
|
||||
|
||||
const softmaxInput = Array(1000).fill(null).map(() => Math.random() * 10 - 5);
|
||||
|
||||
const softmaxOrig = await benchmark('Softmax original', () => softmaxOriginal(softmaxInput), 100);
|
||||
const softmaxOpt = await benchmark('Softmax optimized', () => softmaxOptimized(softmaxInput), 100);
|
||||
|
||||
const softmaxSpeedup = (parseFloat(softmaxOrig.mean) / parseFloat(softmaxOpt.mean)).toFixed(2);
|
||||
console.log(` Original: ${softmaxOrig.mean}ms | Optimized: ${softmaxOpt.mean}ms (${softmaxSpeedup}x speedup)`);
|
||||
results.push(softmaxOrig, softmaxOpt);
|
||||
console.log();
|
||||
|
||||
// Summary
|
||||
console.log('═'.repeat(70));
|
||||
console.log('BENCHMARK SUMMARY');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
console.log('Key Findings:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Optimization │ Speedup │ Memory Impact');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(` Cache-friendly matmul │ ~1.5-2x │ Neutral`);
|
||||
console.log(` Object pooling │ ~2-3x │ -50-80% GC`);
|
||||
console.log(` Ring buffer │ ~10-50x │ O(1) vs O(n)`);
|
||||
console.log(` Optimized softmax │ ~1.2-1.5x│ Fewer allocs`);
|
||||
console.log();
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
runBenchmarks().catch(console.error);
|
||||
873
examples/neural-trader/exotic/gnn-correlation-network.js
Normal file
873
examples/neural-trader/exotic/gnn-correlation-network.js
Normal file
@@ -0,0 +1,873 @@
|
||||
/**
|
||||
* Graph Neural Network Correlation Analysis
|
||||
*
|
||||
* EXOTIC: Market structure as dynamic graphs
|
||||
*
|
||||
* Uses @neural-trader/neural with RuVector for:
|
||||
* - Correlation network construction from returns
|
||||
* - Graph-based feature extraction (centrality, clustering)
|
||||
* - Dynamic topology changes as regime indicators
|
||||
* - Spectral analysis for systemic risk
|
||||
*
|
||||
* Markets are interconnected - GNNs capture these relationships
|
||||
* that traditional linear models miss.
|
||||
*/
|
||||
|
||||
// GNN Configuration
|
||||
const gnnConfig = {
|
||||
// Network construction
|
||||
construction: {
|
||||
method: 'pearson', // pearson, spearman, partial, transfer_entropy
|
||||
windowSize: 60, // Days for correlation calculation
|
||||
edgeThreshold: 0.3, // Minimum |correlation| for edge
|
||||
maxEdgesPerNode: 10 // Limit connections
|
||||
},
|
||||
|
||||
// Graph features
|
||||
features: {
|
||||
nodeCentrality: ['degree', 'betweenness', 'eigenvector', 'pagerank'],
|
||||
graphMetrics: ['density', 'clustering', 'modularity', 'avgPath'],
|
||||
spectral: ['algebraicConnectivity', 'spectralRadius', 'fiedlerVector']
|
||||
},
|
||||
|
||||
// Regime detection
|
||||
regime: {
|
||||
stabilityWindow: 20, // Days to assess stability
|
||||
changeThreshold: 0.15 // Topology change threshold
|
||||
}
|
||||
};
|
||||
|
||||
// Graph Node (Asset)
|
||||
class GraphNode {
|
||||
constructor(symbol, index) {
|
||||
this.symbol = symbol;
|
||||
this.index = index;
|
||||
this.edges = new Map(); // neighbor -> weight
|
||||
this.returns = [];
|
||||
this.features = {};
|
||||
}
|
||||
|
||||
addEdge(neighbor, weight) {
|
||||
this.edges.set(neighbor, weight);
|
||||
}
|
||||
|
||||
removeEdge(neighbor) {
|
||||
this.edges.delete(neighbor);
|
||||
}
|
||||
|
||||
getDegree() {
|
||||
return this.edges.size;
|
||||
}
|
||||
|
||||
getWeightedDegree() {
|
||||
let sum = 0;
|
||||
for (const weight of this.edges.values()) {
|
||||
sum += Math.abs(weight);
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
}
|
||||
|
||||
// Rolling Statistics for O(1) incremental updates
|
||||
class RollingStats {
|
||||
constructor(windowSize) {
|
||||
this.windowSize = windowSize;
|
||||
this.values = [];
|
||||
this.sum = 0;
|
||||
this.sumSq = 0;
|
||||
}
|
||||
|
||||
add(value) {
|
||||
if (this.values.length >= this.windowSize) {
|
||||
const removed = this.values.shift();
|
||||
this.sum -= removed;
|
||||
this.sumSq -= removed * removed;
|
||||
}
|
||||
this.values.push(value);
|
||||
this.sum += value;
|
||||
this.sumSq += value * value;
|
||||
}
|
||||
|
||||
get mean() {
|
||||
return this.values.length > 0 ? this.sum / this.values.length : 0;
|
||||
}
|
||||
|
||||
get variance() {
|
||||
if (this.values.length < 2) return 0;
|
||||
const n = this.values.length;
|
||||
return (this.sumSq - (this.sum * this.sum) / n) / (n - 1);
|
||||
}
|
||||
|
||||
get std() {
|
||||
return Math.sqrt(Math.max(0, this.variance));
|
||||
}
|
||||
}
|
||||
|
||||
// Correlation Network
|
||||
class CorrelationNetwork {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.nodes = new Map();
|
||||
this.adjacencyMatrix = [];
|
||||
this.history = [];
|
||||
this.correlationCache = new Map(); // Cache for correlation pairs
|
||||
this.statsCache = new Map(); // Cache for per-asset statistics
|
||||
}
|
||||
|
||||
// Add asset to network
|
||||
addAsset(symbol) {
|
||||
if (!this.nodes.has(symbol)) {
|
||||
this.nodes.set(symbol, new GraphNode(symbol, this.nodes.size));
|
||||
}
|
||||
return this.nodes.get(symbol);
|
||||
}
|
||||
|
||||
// Update returns for asset with pre-computed stats
|
||||
updateReturns(symbol, returns) {
|
||||
const node = this.addAsset(symbol);
|
||||
node.returns = returns;
|
||||
// Pre-compute statistics for fast correlation
|
||||
this.precomputeStats(symbol, returns);
|
||||
}
|
||||
|
||||
// Pre-compute mean, std, and centered returns for fast correlation
|
||||
precomputeStats(symbol, returns) {
|
||||
const n = returns.length;
|
||||
if (n < 2) {
|
||||
this.statsCache.set(symbol, { mean: 0, std: 0, centered: [], valid: false });
|
||||
return;
|
||||
}
|
||||
|
||||
let sum = 0;
|
||||
for (let i = 0; i < n; i++) sum += returns[i];
|
||||
const mean = sum / n;
|
||||
|
||||
let sumSq = 0;
|
||||
const centered = new Array(n);
|
||||
for (let i = 0; i < n; i++) {
|
||||
centered[i] = returns[i] - mean;
|
||||
sumSq += centered[i] * centered[i];
|
||||
}
|
||||
const std = Math.sqrt(sumSq);
|
||||
|
||||
this.statsCache.set(symbol, { mean, std, centered, valid: std > 1e-10 });
|
||||
}
|
||||
|
||||
// Fast correlation using pre-computed stats (avoids recomputing mean/std)
|
||||
calculateCorrelationFast(symbol1, symbol2) {
|
||||
const s1 = this.statsCache.get(symbol1);
|
||||
const s2 = this.statsCache.get(symbol2);
|
||||
|
||||
if (!s1 || !s2 || !s1.valid || !s2.valid) return 0;
|
||||
if (s1.centered.length !== s2.centered.length) return 0;
|
||||
|
||||
let dotProduct = 0;
|
||||
for (let i = 0; i < s1.centered.length; i++) {
|
||||
dotProduct += s1.centered[i] * s2.centered[i];
|
||||
}
|
||||
|
||||
return dotProduct / (s1.std * s2.std);
|
||||
}
|
||||
|
||||
// Calculate correlation between two return series
|
||||
calculateCorrelation(returns1, returns2, method = 'pearson') {
|
||||
if (returns1.length !== returns2.length || returns1.length < 2) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const n = returns1.length;
|
||||
|
||||
if (method === 'pearson') {
|
||||
let sum1 = 0, sum2 = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
sum1 += returns1[i];
|
||||
sum2 += returns2[i];
|
||||
}
|
||||
const mean1 = sum1 / n;
|
||||
const mean2 = sum2 / n;
|
||||
|
||||
let cov = 0, var1 = 0, var2 = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
const d1 = returns1[i] - mean1;
|
||||
const d2 = returns2[i] - mean2;
|
||||
cov += d1 * d2;
|
||||
var1 += d1 * d1;
|
||||
var2 += d2 * d2;
|
||||
}
|
||||
|
||||
if (var1 === 0 || var2 === 0) return 0;
|
||||
return cov / Math.sqrt(var1 * var2);
|
||||
}
|
||||
|
||||
if (method === 'spearman') {
|
||||
// Rank-based correlation (optimized sort)
|
||||
const rank = (arr) => {
|
||||
const indexed = new Array(arr.length);
|
||||
for (let i = 0; i < arr.length; i++) indexed[i] = { v: arr[i], i };
|
||||
indexed.sort((a, b) => a.v - b.v);
|
||||
const ranks = new Array(arr.length);
|
||||
for (let r = 0; r < indexed.length; r++) ranks[indexed[r].i] = r + 1;
|
||||
return ranks;
|
||||
};
|
||||
|
||||
const ranks1 = rank(returns1);
|
||||
const ranks2 = rank(returns2);
|
||||
return this.calculateCorrelation(ranks1, ranks2, 'pearson');
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Optimized correlation with caching (O(n) instead of O(n²) for repeated calls)
|
||||
calculateCorrelationCached(symbol1, symbol2) {
|
||||
const cacheKey = symbol1 < symbol2 ? `${symbol1}:${symbol2}` : `${symbol2}:${symbol1}`;
|
||||
|
||||
// Check cache validity
|
||||
const cached = this.correlationCache.get(cacheKey);
|
||||
if (cached && Date.now() - cached.timestamp < 1000) {
|
||||
return cached.value;
|
||||
}
|
||||
|
||||
const node1 = this.nodes.get(symbol1);
|
||||
const node2 = this.nodes.get(symbol2);
|
||||
|
||||
if (!node1 || !node2) return 0;
|
||||
|
||||
const correlation = this.calculateCorrelation(
|
||||
node1.returns,
|
||||
node2.returns,
|
||||
this.config.construction.method
|
||||
);
|
||||
|
||||
this.correlationCache.set(cacheKey, { value: correlation, timestamp: Date.now() });
|
||||
return correlation;
|
||||
}
|
||||
|
||||
// Clear correlation cache (call when data updates)
|
||||
invalidateCache() {
|
||||
this.correlationCache.clear();
|
||||
this.statsCache.clear();
|
||||
}
|
||||
|
||||
// Build correlation network
|
||||
buildNetwork() {
|
||||
const symbols = Array.from(this.nodes.keys());
|
||||
const n = symbols.length;
|
||||
|
||||
// Initialize adjacency matrix
|
||||
this.adjacencyMatrix = Array(n).fill(null).map(() => Array(n).fill(0));
|
||||
|
||||
// Clear existing edges
|
||||
for (const node of this.nodes.values()) {
|
||||
node.edges.clear();
|
||||
}
|
||||
|
||||
// Calculate pairwise correlations (use fast path for Pearson with pre-computed stats)
|
||||
const useFastPath = this.config.construction.method === 'pearson' && this.statsCache.size === n;
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (let j = i + 1; j < n; j++) {
|
||||
let correlation;
|
||||
|
||||
if (useFastPath) {
|
||||
// Fast path: use pre-computed centered returns
|
||||
correlation = this.calculateCorrelationFast(symbols[i], symbols[j]);
|
||||
} else {
|
||||
const node1 = this.nodes.get(symbols[i]);
|
||||
const node2 = this.nodes.get(symbols[j]);
|
||||
correlation = this.calculateCorrelation(
|
||||
node1.returns,
|
||||
node2.returns,
|
||||
this.config.construction.method
|
||||
);
|
||||
}
|
||||
|
||||
this.adjacencyMatrix[i][j] = correlation;
|
||||
this.adjacencyMatrix[j][i] = correlation;
|
||||
|
||||
// Add edge if above threshold
|
||||
if (Math.abs(correlation) >= this.config.construction.edgeThreshold) {
|
||||
this.nodes.get(symbols[i]).addEdge(symbols[j], correlation);
|
||||
this.nodes.get(symbols[j]).addEdge(symbols[i], correlation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Limit edges per node
|
||||
this.pruneEdges();
|
||||
}
|
||||
|
||||
// Prune edges to max per node
|
||||
pruneEdges() {
|
||||
for (const node of this.nodes.values()) {
|
||||
if (node.edges.size > this.config.construction.maxEdgesPerNode) {
|
||||
const sorted = Array.from(node.edges.entries())
|
||||
.sort((a, b) => Math.abs(b[1]) - Math.abs(a[1]));
|
||||
|
||||
node.edges.clear();
|
||||
for (let i = 0; i < this.config.construction.maxEdgesPerNode; i++) {
|
||||
node.edges.set(sorted[i][0], sorted[i][1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate node centrality measures
|
||||
calculateNodeCentrality() {
|
||||
const symbols = Array.from(this.nodes.keys());
|
||||
const n = symbols.length;
|
||||
|
||||
for (const node of this.nodes.values()) {
|
||||
// Degree centrality
|
||||
node.features.degreeCentrality = node.getDegree() / (n - 1);
|
||||
node.features.weightedDegree = node.getWeightedDegree();
|
||||
}
|
||||
|
||||
// Eigenvector centrality (power iteration)
|
||||
this.calculateEigenvectorCentrality();
|
||||
|
||||
// PageRank
|
||||
this.calculatePageRank();
|
||||
|
||||
// Betweenness (simplified)
|
||||
this.calculateBetweenness();
|
||||
}
|
||||
|
||||
// Eigenvector centrality
|
||||
calculateEigenvectorCentrality() {
|
||||
const symbols = Array.from(this.nodes.keys());
|
||||
const n = symbols.length;
|
||||
|
||||
let centrality = new Array(n).fill(1 / n);
|
||||
|
||||
for (let iter = 0; iter < 100; iter++) {
|
||||
const newCentrality = new Array(n).fill(0);
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (let j = 0; j < n; j++) {
|
||||
newCentrality[i] += Math.abs(this.adjacencyMatrix[i][j]) * centrality[j];
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize
|
||||
const norm = Math.sqrt(newCentrality.reduce((a, b) => a + b * b, 0));
|
||||
if (norm > 0) {
|
||||
for (let i = 0; i < n; i++) {
|
||||
newCentrality[i] /= norm;
|
||||
}
|
||||
}
|
||||
|
||||
centrality = newCentrality;
|
||||
}
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
this.nodes.get(symbols[i]).features.eigenvectorCentrality = centrality[i];
|
||||
}
|
||||
}
|
||||
|
||||
// PageRank
|
||||
calculatePageRank() {
|
||||
const symbols = Array.from(this.nodes.keys());
|
||||
const n = symbols.length;
|
||||
const d = 0.85; // Damping factor
|
||||
|
||||
let pagerank = new Array(n).fill(1 / n);
|
||||
|
||||
for (let iter = 0; iter < 100; iter++) {
|
||||
const newPagerank = new Array(n).fill((1 - d) / n);
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
const node = this.nodes.get(symbols[i]);
|
||||
const outDegree = node.getDegree() || 1;
|
||||
|
||||
for (const [neighbor] of node.edges) {
|
||||
const j = this.nodes.get(neighbor).index;
|
||||
newPagerank[j] += d * pagerank[i] / outDegree;
|
||||
}
|
||||
}
|
||||
|
||||
pagerank = newPagerank;
|
||||
}
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
this.nodes.get(symbols[i]).features.pagerank = pagerank[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Betweenness centrality (simplified BFS-based)
|
||||
calculateBetweenness() {
|
||||
const symbols = Array.from(this.nodes.keys());
|
||||
const n = symbols.length;
|
||||
const betweenness = new Array(n).fill(0);
|
||||
|
||||
for (let s = 0; s < n; s++) {
|
||||
// BFS from each source
|
||||
const distances = new Array(n).fill(Infinity);
|
||||
const paths = new Array(n).fill(0);
|
||||
const queue = [s];
|
||||
distances[s] = 0;
|
||||
paths[s] = 1;
|
||||
|
||||
while (queue.length > 0) {
|
||||
const current = queue.shift();
|
||||
const node = this.nodes.get(symbols[current]);
|
||||
|
||||
for (const [neighbor] of node.edges) {
|
||||
const j = this.nodes.get(neighbor).index;
|
||||
if (distances[j] === Infinity) {
|
||||
distances[j] = distances[current] + 1;
|
||||
paths[j] = paths[current];
|
||||
queue.push(j);
|
||||
} else if (distances[j] === distances[current] + 1) {
|
||||
paths[j] += paths[current];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Accumulate betweenness
|
||||
for (let t = 0; t < n; t++) {
|
||||
if (s !== t && paths[t] > 0) {
|
||||
for (let v = 0; v < n; v++) {
|
||||
if (v !== s && v !== t && distances[v] < distances[t]) {
|
||||
betweenness[v] += paths[v] / paths[t];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize (avoid division by zero when n < 3)
|
||||
const norm = (n - 1) * (n - 2) / 2;
|
||||
for (let i = 0; i < n; i++) {
|
||||
this.nodes.get(symbols[i]).features.betweenness = norm > 0 ? betweenness[i] / norm : 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate graph-level metrics
|
||||
calculateGraphMetrics() {
|
||||
const symbols = Array.from(this.nodes.keys());
|
||||
const n = symbols.length;
|
||||
|
||||
// Edge count
|
||||
let edgeCount = 0;
|
||||
for (const node of this.nodes.values()) {
|
||||
edgeCount += node.getDegree();
|
||||
}
|
||||
edgeCount /= 2; // Undirected
|
||||
|
||||
// Density (avoid division by zero when n < 2)
|
||||
const maxEdges = n * (n - 1) / 2;
|
||||
const density = maxEdges > 0 ? edgeCount / maxEdges : 0;
|
||||
|
||||
// Average clustering coefficient
|
||||
let totalClustering = 0;
|
||||
for (const node of this.nodes.values()) {
|
||||
const neighbors = Array.from(node.edges.keys());
|
||||
const k = neighbors.length;
|
||||
|
||||
if (k < 2) {
|
||||
node.features.clusteringCoeff = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
let triangles = 0;
|
||||
for (let i = 0; i < k; i++) {
|
||||
for (let j = i + 1; j < k; j++) {
|
||||
const neighbor1 = this.nodes.get(neighbors[i]);
|
||||
if (neighbor1.edges.has(neighbors[j])) {
|
||||
triangles++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const maxTriangles = k * (k - 1) / 2;
|
||||
node.features.clusteringCoeff = triangles / maxTriangles;
|
||||
totalClustering += node.features.clusteringCoeff;
|
||||
}
|
||||
|
||||
const avgClustering = n > 0 ? totalClustering / n : 0;
|
||||
|
||||
return {
|
||||
nodes: n,
|
||||
edges: edgeCount,
|
||||
density,
|
||||
avgClustering,
|
||||
avgDegree: n > 0 ? (2 * edgeCount) / n : 0
|
||||
};
|
||||
}
|
||||
|
||||
// Spectral analysis
|
||||
calculateSpectralFeatures() {
|
||||
const n = this.adjacencyMatrix.length;
|
||||
if (n < 2) return {};
|
||||
|
||||
// Laplacian matrix L = D - A
|
||||
const laplacian = Array(n).fill(null).map(() => Array(n).fill(0));
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
let degree = 0;
|
||||
for (let j = 0; j < n; j++) {
|
||||
if (i !== j && Math.abs(this.adjacencyMatrix[i][j]) >= this.config.construction.edgeThreshold) {
|
||||
laplacian[i][j] = -1;
|
||||
degree++;
|
||||
}
|
||||
}
|
||||
laplacian[i][i] = degree;
|
||||
}
|
||||
|
||||
// Power iteration for largest eigenvalue (spectral radius)
|
||||
let v = new Array(n).fill(1 / Math.sqrt(n));
|
||||
let eigenvalue = 0;
|
||||
|
||||
for (let iter = 0; iter < 50; iter++) {
|
||||
const newV = new Array(n).fill(0);
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (let j = 0; j < n; j++) {
|
||||
newV[i] += Math.abs(this.adjacencyMatrix[i][j]) * v[j];
|
||||
}
|
||||
}
|
||||
|
||||
eigenvalue = Math.sqrt(newV.reduce((a, b) => a + b * b, 0));
|
||||
if (eigenvalue > 0) {
|
||||
for (let i = 0; i < n; i++) {
|
||||
v[i] = newV[i] / eigenvalue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Estimate algebraic connectivity (second smallest Laplacian eigenvalue)
|
||||
// Using inverse power iteration on L
|
||||
const algebraicConnectivity = this.estimateAlgebraicConnectivity(laplacian);
|
||||
|
||||
return {
|
||||
spectralRadius: eigenvalue,
|
||||
algebraicConnectivity,
|
||||
estimatedComponents: algebraicConnectivity < 0.01 ? 'multiple' : 'single'
|
||||
};
|
||||
}
|
||||
|
||||
estimateAlgebraicConnectivity(laplacian) {
|
||||
const n = laplacian.length;
|
||||
if (n < 2) return 0;
|
||||
|
||||
// Simplified: use trace / n as rough estimate
|
||||
let trace = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
trace += laplacian[i][i];
|
||||
}
|
||||
|
||||
return trace / n * 0.1; // Rough approximation
|
||||
}
|
||||
|
||||
// Detect regime change by comparing networks
|
||||
detectRegimeChange(previousMetrics, currentMetrics) {
|
||||
if (!previousMetrics) return { changed: false };
|
||||
|
||||
const densityChange = Math.abs(currentMetrics.density - previousMetrics.density);
|
||||
const clusteringChange = Math.abs(currentMetrics.avgClustering - previousMetrics.avgClustering);
|
||||
|
||||
const totalChange = densityChange + clusteringChange;
|
||||
const changed = totalChange > this.config.regime.changeThreshold;
|
||||
|
||||
return {
|
||||
changed,
|
||||
densityChange,
|
||||
clusteringChange,
|
||||
totalChange,
|
||||
regime: this.classifyRegime(currentMetrics)
|
||||
};
|
||||
}
|
||||
|
||||
classifyRegime(metrics) {
|
||||
if (metrics.density > 0.5 && metrics.avgClustering > 0.4) {
|
||||
return 'crisis'; // High connectivity = systemic risk
|
||||
} else if (metrics.density < 0.2) {
|
||||
return 'dispersion'; // Low connectivity = idiosyncratic
|
||||
}
|
||||
return 'normal';
|
||||
}
|
||||
|
||||
// Save network state to history
|
||||
saveSnapshot() {
|
||||
this.history.push({
|
||||
timestamp: Date.now(),
|
||||
metrics: this.calculateGraphMetrics(),
|
||||
spectral: this.calculateSpectralFeatures()
|
||||
});
|
||||
|
||||
if (this.history.length > 100) {
|
||||
this.history.shift();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate synthetic multi-asset returns
|
||||
function generateMultiAssetData(assets, days, seed = 42) {
|
||||
let rng = seed;
|
||||
const random = () => {
|
||||
rng = (rng * 9301 + 49297) % 233280;
|
||||
return rng / 233280;
|
||||
};
|
||||
|
||||
// Correlation structure (sector-based)
|
||||
const sectors = {
|
||||
tech: ['AAPL', 'MSFT', 'GOOGL', 'NVDA'],
|
||||
finance: ['JPM', 'BAC', 'GS', 'MS'],
|
||||
energy: ['XOM', 'CVX', 'COP', 'SLB'],
|
||||
healthcare: ['JNJ', 'PFE', 'UNH', 'ABBV'],
|
||||
consumer: ['AMZN', 'WMT', 'HD', 'NKE']
|
||||
};
|
||||
|
||||
const data = {};
|
||||
for (const asset of assets) {
|
||||
data[asset] = [];
|
||||
}
|
||||
|
||||
// Find sector for each asset
|
||||
const assetSector = {};
|
||||
for (const [sector, members] of Object.entries(sectors)) {
|
||||
for (const asset of members) {
|
||||
assetSector[asset] = sector;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate correlated returns
|
||||
for (let day = 0; day < days; day++) {
|
||||
const marketFactor = (random() - 0.5) * 0.02;
|
||||
const sectorFactors = {};
|
||||
|
||||
for (const sector of Object.keys(sectors)) {
|
||||
sectorFactors[sector] = (random() - 0.5) * 0.015;
|
||||
}
|
||||
|
||||
for (const asset of assets) {
|
||||
const sector = assetSector[asset] || 'other';
|
||||
const sectorFactor = sectorFactors[sector] || 0;
|
||||
const idiosyncratic = (random() - 0.5) * 0.025;
|
||||
|
||||
// Return = market + sector + idiosyncratic
|
||||
const return_ = marketFactor * 0.5 + sectorFactor * 0.3 + idiosyncratic * 0.2;
|
||||
data[asset].push(return_);
|
||||
}
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('GRAPH NEURAL NETWORK CORRELATION ANALYSIS');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// 1. Generate multi-asset data
|
||||
console.log('1. Multi-Asset Data Generation:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const assets = [
|
||||
'AAPL', 'MSFT', 'GOOGL', 'NVDA', // Tech
|
||||
'JPM', 'BAC', 'GS', 'MS', // Finance
|
||||
'XOM', 'CVX', 'COP', 'SLB', // Energy
|
||||
'JNJ', 'PFE', 'UNH', 'ABBV', // Healthcare
|
||||
'AMZN', 'WMT', 'HD', 'NKE' // Consumer
|
||||
];
|
||||
|
||||
const days = 120;
|
||||
const returnData = generateMultiAssetData(assets, days);
|
||||
|
||||
console.log(` Assets: ${assets.length}`);
|
||||
console.log(` Days: ${days}`);
|
||||
console.log(` Sectors: Tech, Finance, Energy, Healthcare, Consumer`);
|
||||
console.log();
|
||||
|
||||
// 2. Build correlation network
|
||||
console.log('2. Correlation Network Construction:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const network = new CorrelationNetwork(gnnConfig);
|
||||
|
||||
for (const asset of assets) {
|
||||
network.updateReturns(asset, returnData[asset]);
|
||||
}
|
||||
|
||||
network.buildNetwork();
|
||||
|
||||
console.log(` Correlation method: ${gnnConfig.construction.method}`);
|
||||
console.log(` Edge threshold: ${gnnConfig.construction.edgeThreshold}`);
|
||||
console.log(` Max edges/node: ${gnnConfig.construction.maxEdgesPerNode}`);
|
||||
console.log();
|
||||
|
||||
// 3. Graph metrics
|
||||
console.log('3. Graph-Level Metrics:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const graphMetrics = network.calculateGraphMetrics();
|
||||
|
||||
console.log(` Nodes: ${graphMetrics.nodes}`);
|
||||
console.log(` Edges: ${graphMetrics.edges}`);
|
||||
console.log(` Density: ${(graphMetrics.density * 100).toFixed(1)}%`);
|
||||
console.log(` Avg Clustering: ${(graphMetrics.avgClustering * 100).toFixed(1)}%`);
|
||||
console.log(` Avg Degree: ${graphMetrics.avgDegree.toFixed(2)}`);
|
||||
console.log();
|
||||
|
||||
// 4. Node centrality
|
||||
console.log('4. Node Centrality Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
network.calculateNodeCentrality();
|
||||
|
||||
console.log(' Top 5 by Degree Centrality:');
|
||||
const byDegree = Array.from(network.nodes.values())
|
||||
.sort((a, b) => b.features.degreeCentrality - a.features.degreeCentrality)
|
||||
.slice(0, 5);
|
||||
|
||||
for (const node of byDegree) {
|
||||
console.log(` - ${node.symbol.padEnd(5)} ${(node.features.degreeCentrality * 100).toFixed(1)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
console.log(' Top 5 by Eigenvector Centrality:');
|
||||
const byEigen = Array.from(network.nodes.values())
|
||||
.sort((a, b) => b.features.eigenvectorCentrality - a.features.eigenvectorCentrality)
|
||||
.slice(0, 5);
|
||||
|
||||
for (const node of byEigen) {
|
||||
console.log(` - ${node.symbol.padEnd(5)} ${(node.features.eigenvectorCentrality * 100).toFixed(1)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
console.log(' Top 5 by PageRank:');
|
||||
const byPagerank = Array.from(network.nodes.values())
|
||||
.sort((a, b) => b.features.pagerank - a.features.pagerank)
|
||||
.slice(0, 5);
|
||||
|
||||
for (const node of byPagerank) {
|
||||
console.log(` - ${node.symbol.padEnd(5)} ${(node.features.pagerank * 100).toFixed(1)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 5. Spectral analysis
|
||||
console.log('5. Spectral Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const spectral = network.calculateSpectralFeatures();
|
||||
|
||||
console.log(` Spectral Radius: ${spectral.spectralRadius.toFixed(4)}`);
|
||||
console.log(` Algebraic Connectivity: ${spectral.algebraicConnectivity.toFixed(4)}`);
|
||||
console.log(` Estimated Components: ${spectral.estimatedComponents}`);
|
||||
console.log();
|
||||
|
||||
// 6. Correlation matrix visualization
|
||||
console.log('6. Correlation Matrix (Sample 5x5):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const sampleAssets = assets.slice(0, 5);
|
||||
console.log(' ' + sampleAssets.map(a => a.slice(0, 4).padStart(6)).join(''));
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
let row = sampleAssets[i].slice(0, 4).padEnd(6) + ' ';
|
||||
for (let j = 0; j < 5; j++) {
|
||||
const corr = network.adjacencyMatrix[i][j];
|
||||
row += (corr >= 0 ? '+' : '') + corr.toFixed(2) + ' ';
|
||||
}
|
||||
console.log(' ' + row);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 7. Network edges (sample)
|
||||
console.log('7. Strongest Connections (Top 10):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const edges = [];
|
||||
const symbols = Array.from(network.nodes.keys());
|
||||
for (let i = 0; i < symbols.length; i++) {
|
||||
for (let j = i + 1; j < symbols.length; j++) {
|
||||
const corr = network.adjacencyMatrix[i][j];
|
||||
if (Math.abs(corr) >= gnnConfig.construction.edgeThreshold) {
|
||||
edges.push({ from: symbols[i], to: symbols[j], weight: corr });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
edges.sort((a, b) => Math.abs(b.weight) - Math.abs(a.weight));
|
||||
|
||||
for (const edge of edges.slice(0, 10)) {
|
||||
const sign = edge.weight > 0 ? '+' : '';
|
||||
console.log(` ${edge.from.padEnd(5)} ↔ ${edge.to.padEnd(5)} ${sign}${edge.weight.toFixed(3)}`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 8. Regime analysis
|
||||
console.log('8. Regime Classification:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const regime = network.classifyRegime(graphMetrics);
|
||||
|
||||
console.log(` Current Regime: ${regime.toUpperCase()}`);
|
||||
console.log();
|
||||
console.log(' Interpretation:');
|
||||
|
||||
if (regime === 'crisis') {
|
||||
console.log(' - High network connectivity indicates systemic risk');
|
||||
console.log(' - Correlations converging → diversification failing');
|
||||
console.log(' - Recommendation: Reduce exposure, hedge tail risk');
|
||||
} else if (regime === 'dispersion') {
|
||||
console.log(' - Low network connectivity indicates idiosyncratic moves');
|
||||
console.log(' - Good for stock picking and alpha generation');
|
||||
console.log(' - Recommendation: Active management, sector rotation');
|
||||
} else {
|
||||
console.log(' - Normal market conditions');
|
||||
console.log(' - Standard correlation structure');
|
||||
console.log(' - Recommendation: Balanced approach');
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 9. Trading implications
|
||||
console.log('9. Trading Implications:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const highCentrality = byEigen[0];
|
||||
const lowCentrality = Array.from(network.nodes.values())
|
||||
.sort((a, b) => a.features.eigenvectorCentrality - b.features.eigenvectorCentrality)[0];
|
||||
|
||||
console.log(` Most Central Asset: ${highCentrality.symbol}`);
|
||||
console.log(` - Moves with market, good for beta exposure`);
|
||||
console.log(` - Higher correlation = less diversification benefit`);
|
||||
console.log();
|
||||
console.log(` Least Central Asset: ${lowCentrality.symbol}`);
|
||||
console.log(` - More idiosyncratic behavior`);
|
||||
console.log(` - Potential alpha source, better diversifier`);
|
||||
console.log();
|
||||
|
||||
// 10. RuVector integration
|
||||
console.log('10. RuVector Vector Storage:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Each node\'s features can be stored as vectors:');
|
||||
console.log();
|
||||
|
||||
const sampleNode = network.nodes.get('AAPL');
|
||||
const featureVector = [
|
||||
sampleNode.features.degreeCentrality,
|
||||
sampleNode.features.eigenvectorCentrality,
|
||||
sampleNode.features.pagerank,
|
||||
sampleNode.features.betweenness,
|
||||
sampleNode.features.clusteringCoeff || 0
|
||||
];
|
||||
|
||||
console.log(` ${sampleNode.symbol} feature vector:`);
|
||||
console.log(` [${featureVector.map(v => v.toFixed(4)).join(', ')}]`);
|
||||
console.log();
|
||||
console.log(' Vector dimensions:');
|
||||
console.log(' [degree, eigenvector, pagerank, betweenness, clustering]');
|
||||
console.log();
|
||||
console.log(' Use case: Find assets with similar network positions');
|
||||
console.log(' via HNSW nearest neighbor search in RuVector');
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Graph neural network analysis completed');
|
||||
console.log('═'.repeat(70));
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
624
examples/neural-trader/exotic/hyperbolic-embeddings.js
Normal file
624
examples/neural-trader/exotic/hyperbolic-embeddings.js
Normal file
@@ -0,0 +1,624 @@
|
||||
/**
|
||||
* Hyperbolic Market Embeddings
|
||||
*
|
||||
* EXOTIC: Poincaré disk embeddings for hierarchical market structure
|
||||
*
|
||||
* Uses @neural-trader/neural with RuVector for:
|
||||
* - Poincaré ball model for hyperbolic geometry
|
||||
* - Exponential capacity for tree-like hierarchies
|
||||
* - Market taxonomy learning (sector → industry → company)
|
||||
* - Distance preservation in curved space
|
||||
*
|
||||
* Hyperbolic space naturally represents hierarchical relationships
|
||||
* that exist in markets (market → sector → industry → stock).
|
||||
*/
|
||||
|
||||
// Hyperbolic embedding configuration
|
||||
const hyperbolicConfig = {
|
||||
// Embedding parameters
|
||||
embedding: {
|
||||
dimension: 2, // 2D for visualization, can be higher
|
||||
curvature: -1, // Negative curvature (hyperbolic)
|
||||
learningRate: 0.01,
|
||||
epochs: 100,
|
||||
negSamples: 5 // Negative sampling
|
||||
},
|
||||
|
||||
// Market hierarchy
|
||||
hierarchy: {
|
||||
levels: ['Market', 'Sector', 'Industry', 'Stock'],
|
||||
useCorrelations: true // Learn from return correlations
|
||||
},
|
||||
|
||||
// Poincaré ball constraints
|
||||
poincare: {
|
||||
maxNorm: 0.99, // Stay inside unit ball
|
||||
epsilon: 1e-5 // Numerical stability
|
||||
}
|
||||
};
|
||||
|
||||
// Poincaré Ball Operations
|
||||
class PoincareOperations {
|
||||
constructor(curvature = -1) {
|
||||
this.c = Math.abs(curvature);
|
||||
this.sqrtC = Math.sqrt(this.c);
|
||||
}
|
||||
|
||||
// Möbius addition: x ⊕ y
|
||||
mobiusAdd(x, y) {
|
||||
const c = this.c;
|
||||
const xNorm2 = x.reduce((s, v) => s + v * v, 0);
|
||||
const yNorm2 = y.reduce((s, v) => s + v * v, 0);
|
||||
const xy = x.reduce((s, v, i) => s + v * y[i], 0);
|
||||
|
||||
const denom = 1 + 2 * c * xy + c * c * xNorm2 * yNorm2;
|
||||
|
||||
return x.map((xi, i) => {
|
||||
const num = (1 + 2 * c * xy + c * yNorm2) * xi + (1 - c * xNorm2) * y[i];
|
||||
return num / denom;
|
||||
});
|
||||
}
|
||||
|
||||
// Poincaré distance with numerical stability
|
||||
distance(x, y) {
|
||||
const diff = x.map((v, i) => v - y[i]);
|
||||
const diffNorm2 = diff.reduce((s, v) => s + v * v, 0);
|
||||
const xNorm2 = x.reduce((s, v) => s + v * v, 0);
|
||||
const yNorm2 = y.reduce((s, v) => s + v * v, 0);
|
||||
|
||||
// Ensure points are inside the ball
|
||||
const eps = hyperbolicConfig.poincare.epsilon;
|
||||
const safeXNorm2 = Math.min(xNorm2, 1 - eps);
|
||||
const safeYNorm2 = Math.min(yNorm2, 1 - eps);
|
||||
|
||||
const num = 2 * diffNorm2;
|
||||
const denom = (1 - safeXNorm2) * (1 - safeYNorm2);
|
||||
|
||||
// Guard against numerical issues with Math.acosh (arg must be >= 1)
|
||||
const arg = 1 + num / Math.max(denom, eps);
|
||||
const safeArg = Math.max(1, arg); // acosh domain is [1, inf)
|
||||
|
||||
return Math.acosh(safeArg) / this.sqrtC;
|
||||
}
|
||||
|
||||
// Exponential map: tangent space → manifold
|
||||
expMap(x, v) {
|
||||
const c = this.c;
|
||||
const vNorm = Math.sqrt(v.reduce((s, vi) => s + vi * vi, 0)) + hyperbolicConfig.poincare.epsilon;
|
||||
const xNorm2 = x.reduce((s, xi) => s + xi * xi, 0);
|
||||
|
||||
const lambda = 2 / (1 - c * xNorm2);
|
||||
const t = Math.tanh(this.sqrtC * lambda * vNorm / 2);
|
||||
|
||||
const y = v.map(vi => t * vi / (this.sqrtC * vNorm));
|
||||
|
||||
return this.mobiusAdd(x, y);
|
||||
}
|
||||
|
||||
// Logarithmic map: manifold → tangent space
|
||||
logMap(x, y) {
|
||||
const c = this.c;
|
||||
const eps = hyperbolicConfig.poincare.epsilon;
|
||||
const xNorm2 = Math.min(x.reduce((s, xi) => s + xi * xi, 0), 1 - eps);
|
||||
const lambda = 2 / Math.max(1 - c * xNorm2, eps);
|
||||
|
||||
const mxy = this.mobiusAdd(x.map(v => -v), y);
|
||||
const mxyNorm = Math.sqrt(mxy.reduce((s, v) => s + v * v, 0)) + eps;
|
||||
|
||||
// Guard atanh domain: argument must be in (-1, 1)
|
||||
const atanhArg = Math.min(this.sqrtC * mxyNorm, 1 - eps);
|
||||
const t = Math.atanh(atanhArg);
|
||||
|
||||
return mxy.map(v => 2 * t * v / (lambda * this.sqrtC * mxyNorm));
|
||||
}
|
||||
|
||||
// Project to Poincaré ball (ensure ||x|| < 1)
|
||||
project(x) {
|
||||
const norm = Math.sqrt(x.reduce((s, v) => s + v * v, 0));
|
||||
const maxNorm = hyperbolicConfig.poincare.maxNorm;
|
||||
|
||||
if (norm >= maxNorm) {
|
||||
return x.map(v => v * maxNorm / norm);
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
// Riemannian gradient (for optimization)
|
||||
riemannianGrad(x, euclideanGrad) {
|
||||
const xNorm2 = x.reduce((s, v) => s + v * v, 0);
|
||||
const scale = Math.pow((1 - this.c * xNorm2), 2) / 4;
|
||||
|
||||
return euclideanGrad.map(g => g * scale);
|
||||
}
|
||||
}
|
||||
|
||||
// Hyperbolic Embedding Model
|
||||
class HyperbolicEmbedding {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.poincare = new PoincareOperations(config.embedding.curvature);
|
||||
this.embeddings = new Map();
|
||||
this.hierarchyGraph = new Map();
|
||||
this.losses = [];
|
||||
}
|
||||
|
||||
// Initialize embedding for entity
|
||||
initEmbedding(entity) {
|
||||
const dim = this.config.embedding.dimension;
|
||||
|
||||
// Initialize near origin (parent entities will move toward center)
|
||||
const embedding = [];
|
||||
for (let i = 0; i < dim; i++) {
|
||||
embedding.push((Math.random() - 0.5) * 0.1);
|
||||
}
|
||||
|
||||
this.embeddings.set(entity, this.poincare.project(embedding));
|
||||
}
|
||||
|
||||
// Add hierarchy relationship
|
||||
addHierarchy(parent, child) {
|
||||
if (!this.hierarchyGraph.has(parent)) {
|
||||
this.hierarchyGraph.set(parent, { children: [], parent: null });
|
||||
}
|
||||
if (!this.hierarchyGraph.has(child)) {
|
||||
this.hierarchyGraph.set(child, { children: [], parent: null });
|
||||
}
|
||||
|
||||
this.hierarchyGraph.get(parent).children.push(child);
|
||||
this.hierarchyGraph.get(child).parent = parent;
|
||||
|
||||
// Initialize embeddings
|
||||
if (!this.embeddings.has(parent)) this.initEmbedding(parent);
|
||||
if (!this.embeddings.has(child)) this.initEmbedding(child);
|
||||
}
|
||||
|
||||
// Training loss: children should be farther from origin than parents
|
||||
computeLoss(parent, child) {
|
||||
const pEmb = this.embeddings.get(parent);
|
||||
const cEmb = this.embeddings.get(child);
|
||||
|
||||
if (!pEmb || !cEmb) return 0;
|
||||
|
||||
// Distance from origin
|
||||
const pDist = Math.sqrt(pEmb.reduce((s, v) => s + v * v, 0));
|
||||
const cDist = Math.sqrt(cEmb.reduce((s, v) => s + v * v, 0));
|
||||
|
||||
// Parent should be closer to origin
|
||||
const hierarchyLoss = Math.max(0, pDist - cDist + 0.1);
|
||||
|
||||
// Parent-child should be close
|
||||
const distLoss = this.poincare.distance(pEmb, cEmb);
|
||||
|
||||
return hierarchyLoss + 0.5 * distLoss;
|
||||
}
|
||||
|
||||
// Train embeddings
|
||||
train() {
|
||||
const lr = this.config.embedding.learningRate;
|
||||
|
||||
for (let epoch = 0; epoch < this.config.embedding.epochs; epoch++) {
|
||||
let totalLoss = 0;
|
||||
|
||||
// For each parent-child pair
|
||||
for (const [entity, info] of this.hierarchyGraph) {
|
||||
for (const child of info.children) {
|
||||
const loss = this.computeLoss(entity, child);
|
||||
totalLoss += loss;
|
||||
|
||||
// Gradient update (simplified)
|
||||
this.updateEmbedding(entity, child, lr);
|
||||
}
|
||||
}
|
||||
|
||||
this.losses.push(totalLoss);
|
||||
|
||||
// Decay learning rate
|
||||
if (epoch % 20 === 0) {
|
||||
// lr *= 0.9;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Riemannian gradient descent update
|
||||
updateEmbedding(parent, child, lr) {
|
||||
const pEmb = this.embeddings.get(parent);
|
||||
const cEmb = this.embeddings.get(child);
|
||||
const eps = hyperbolicConfig.poincare.epsilon;
|
||||
|
||||
// Compute Euclidean gradients
|
||||
const pNorm2 = pEmb.reduce((s, v) => s + v * v, 0);
|
||||
const cNorm2 = cEmb.reduce((s, v) => s + v * v, 0);
|
||||
|
||||
// Gradient for parent: move toward origin (hierarchy constraint)
|
||||
const pGradEuclid = pEmb.map(v => v); // gradient of ||x||^2 is 2x
|
||||
|
||||
// Gradient for child: move toward parent but stay farther from origin
|
||||
const direction = cEmb.map((v, i) => pEmb[i] - v);
|
||||
const dirNorm = Math.sqrt(direction.reduce((s, d) => s + d * d, 0)) + eps;
|
||||
const normalizedDir = direction.map(d => d / dirNorm);
|
||||
|
||||
// Child gradient: toward parent + outward from origin
|
||||
const cGradEuclid = cEmb.map((v, i) => -normalizedDir[i] * 0.3 - v * 0.1);
|
||||
|
||||
// Convert to Riemannian gradients using metric tensor
|
||||
const pRiemannGrad = this.poincare.riemannianGrad(pEmb, pGradEuclid);
|
||||
const cRiemannGrad = this.poincare.riemannianGrad(cEmb, cGradEuclid);
|
||||
|
||||
// Update using exponential map (proper Riemannian SGD)
|
||||
const pTangent = pRiemannGrad.map(g => -lr * g);
|
||||
const cTangent = cRiemannGrad.map(g => -lr * g);
|
||||
|
||||
const newPEmb = this.poincare.expMap(pEmb, pTangent);
|
||||
const newCEmb = this.poincare.expMap(cEmb, cTangent);
|
||||
|
||||
this.embeddings.set(parent, this.poincare.project(newPEmb));
|
||||
this.embeddings.set(child, this.poincare.project(newCEmb));
|
||||
}
|
||||
|
||||
// Get embedding
|
||||
getEmbedding(entity) {
|
||||
return this.embeddings.get(entity);
|
||||
}
|
||||
|
||||
// Find nearest neighbors in hyperbolic space
|
||||
findNearest(entity, k = 5) {
|
||||
const emb = this.embeddings.get(entity);
|
||||
if (!emb) return [];
|
||||
|
||||
const distances = [];
|
||||
for (const [other, otherEmb] of this.embeddings) {
|
||||
if (other !== entity) {
|
||||
distances.push({
|
||||
entity: other,
|
||||
distance: this.poincare.distance(emb, otherEmb)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return distances.sort((a, b) => a.distance - b.distance).slice(0, k);
|
||||
}
|
||||
|
||||
// Get depth (distance from origin)
|
||||
getDepth(entity) {
|
||||
const emb = this.embeddings.get(entity);
|
||||
if (!emb) return 0;
|
||||
return Math.sqrt(emb.reduce((s, v) => s + v * v, 0));
|
||||
}
|
||||
}
|
||||
|
||||
// Market hierarchy builder
|
||||
class MarketHierarchy {
|
||||
constructor() {
|
||||
this.sectors = {
|
||||
'Technology': ['Software', 'Hardware', 'Semiconductors'],
|
||||
'Healthcare': ['Pharma', 'Biotech', 'MedDevices'],
|
||||
'Finance': ['Banks', 'Insurance', 'AssetMgmt'],
|
||||
'Energy': ['Oil', 'Gas', 'Renewables'],
|
||||
'Consumer': ['Retail', 'FoodBev', 'Apparel']
|
||||
};
|
||||
|
||||
this.industries = {
|
||||
'Software': ['MSFT', 'ORCL', 'CRM'],
|
||||
'Hardware': ['AAPL', 'DELL', 'HPQ'],
|
||||
'Semiconductors': ['NVDA', 'AMD', 'INTC'],
|
||||
'Pharma': ['JNJ', 'PFE', 'MRK'],
|
||||
'Biotech': ['AMGN', 'GILD', 'BIIB'],
|
||||
'MedDevices': ['MDT', 'ABT', 'SYK'],
|
||||
'Banks': ['JPM', 'BAC', 'WFC'],
|
||||
'Insurance': ['BRK', 'MET', 'AIG'],
|
||||
'AssetMgmt': ['BLK', 'GS', 'MS'],
|
||||
'Oil': ['XOM', 'CVX', 'COP'],
|
||||
'Gas': ['SLB', 'HAL', 'BKR'],
|
||||
'Renewables': ['NEE', 'ENPH', 'SEDG'],
|
||||
'Retail': ['AMZN', 'WMT', 'TGT'],
|
||||
'FoodBev': ['KO', 'PEP', 'MCD'],
|
||||
'Apparel': ['NKE', 'LULU', 'TJX']
|
||||
};
|
||||
}
|
||||
|
||||
buildHierarchy(embedding) {
|
||||
// Market → Sectors
|
||||
for (const sector of Object.keys(this.sectors)) {
|
||||
embedding.addHierarchy('Market', sector);
|
||||
|
||||
// Sector → Industries
|
||||
for (const industry of this.sectors[sector]) {
|
||||
embedding.addHierarchy(sector, industry);
|
||||
|
||||
// Industry → Stocks
|
||||
if (this.industries[industry]) {
|
||||
for (const stock of this.industries[industry]) {
|
||||
embedding.addHierarchy(industry, stock);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getAllStocks() {
|
||||
const stocks = [];
|
||||
for (const industry of Object.values(this.industries)) {
|
||||
stocks.push(...industry);
|
||||
}
|
||||
return stocks;
|
||||
}
|
||||
}
|
||||
|
||||
// Visualization helper
|
||||
class HyperbolicVisualizer {
|
||||
visualize(embedding, width = 40, height = 20) {
|
||||
const grid = [];
|
||||
for (let i = 0; i < height; i++) {
|
||||
grid.push(new Array(width).fill(' '));
|
||||
}
|
||||
|
||||
// Draw unit circle boundary
|
||||
for (let angle = 0; angle < 2 * Math.PI; angle += 0.1) {
|
||||
const x = Math.cos(angle) * 0.95;
|
||||
const y = Math.sin(angle) * 0.95;
|
||||
|
||||
const gridX = Math.floor((x + 1) / 2 * (width - 1));
|
||||
const gridY = Math.floor((1 - y) / 2 * (height - 1));
|
||||
|
||||
if (gridY >= 0 && gridY < height && gridX >= 0 && gridX < width) {
|
||||
grid[gridY][gridX] = '·';
|
||||
}
|
||||
}
|
||||
|
||||
// Plot embeddings
|
||||
const symbols = {
|
||||
market: '◉',
|
||||
sector: '●',
|
||||
industry: '○',
|
||||
stock: '·'
|
||||
};
|
||||
|
||||
for (const [entity, emb] of embedding.embeddings) {
|
||||
const x = emb[0];
|
||||
const y = emb[1];
|
||||
|
||||
const gridX = Math.floor((x + 1) / 2 * (width - 1));
|
||||
const gridY = Math.floor((1 - y) / 2 * (height - 1));
|
||||
|
||||
if (gridY >= 0 && gridY < height && gridX >= 0 && gridX < width) {
|
||||
let symbol = '?';
|
||||
if (entity === 'Market') symbol = symbols.market;
|
||||
else if (['Technology', 'Healthcare', 'Finance', 'Energy', 'Consumer'].includes(entity)) symbol = symbols.sector;
|
||||
else if (entity.length > 4) symbol = symbols.industry;
|
||||
else symbol = symbols.stock;
|
||||
|
||||
grid[gridY][gridX] = symbol;
|
||||
}
|
||||
}
|
||||
|
||||
return grid.map(row => row.join('')).join('\n');
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('HYPERBOLIC MARKET EMBEDDINGS');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// 1. Build market hierarchy
|
||||
console.log('1. Market Hierarchy Construction:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const hierarchy = new MarketHierarchy();
|
||||
const embedding = new HyperbolicEmbedding(hyperbolicConfig);
|
||||
|
||||
hierarchy.buildHierarchy(embedding);
|
||||
|
||||
console.log(` Levels: ${hyperbolicConfig.hierarchy.levels.join(' → ')}`);
|
||||
console.log(` Sectors: ${Object.keys(hierarchy.sectors).length}`);
|
||||
console.log(` Industries: ${Object.keys(hierarchy.industries).length}`);
|
||||
console.log(` Stocks: ${hierarchy.getAllStocks().length}`);
|
||||
console.log(` Dimension: ${hyperbolicConfig.embedding.dimension}D Poincaré ball`);
|
||||
console.log();
|
||||
|
||||
// 2. Train embeddings
|
||||
console.log('2. Training Hyperbolic Embeddings:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
embedding.train();
|
||||
|
||||
console.log(` Epochs: ${hyperbolicConfig.embedding.epochs}`);
|
||||
console.log(` Learning rate: ${hyperbolicConfig.embedding.learningRate}`);
|
||||
console.log(` Initial loss: ${embedding.losses[0]?.toFixed(4) || 'N/A'}`);
|
||||
console.log(` Final loss: ${embedding.losses[embedding.losses.length - 1]?.toFixed(4) || 'N/A'}`);
|
||||
console.log();
|
||||
|
||||
// 3. Embedding depths
|
||||
console.log('3. Hierarchy Depth Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(' Entity depths (distance from origin):');
|
||||
console.log();
|
||||
|
||||
// Market (root)
|
||||
const marketDepth = embedding.getDepth('Market');
|
||||
console.log(` Market (root): ${marketDepth.toFixed(4)}`);
|
||||
|
||||
// Sectors
|
||||
let avgSectorDepth = 0;
|
||||
for (const sector of Object.keys(hierarchy.sectors)) {
|
||||
avgSectorDepth += embedding.getDepth(sector);
|
||||
}
|
||||
avgSectorDepth /= Object.keys(hierarchy.sectors).length;
|
||||
console.log(` Sectors (avg): ${avgSectorDepth.toFixed(4)}`);
|
||||
|
||||
// Industries
|
||||
let avgIndustryDepth = 0;
|
||||
let industryCount = 0;
|
||||
for (const industry of Object.keys(hierarchy.industries)) {
|
||||
avgIndustryDepth += embedding.getDepth(industry);
|
||||
industryCount++;
|
||||
}
|
||||
avgIndustryDepth /= industryCount;
|
||||
console.log(` Industries (avg): ${avgIndustryDepth.toFixed(4)}`);
|
||||
|
||||
// Stocks
|
||||
let avgStockDepth = 0;
|
||||
const stocks = hierarchy.getAllStocks();
|
||||
for (const stock of stocks) {
|
||||
avgStockDepth += embedding.getDepth(stock);
|
||||
}
|
||||
avgStockDepth /= stocks.length;
|
||||
console.log(` Stocks (avg): ${avgStockDepth.toFixed(4)}`);
|
||||
console.log();
|
||||
|
||||
console.log(' Depth increases with hierarchy level ✓');
|
||||
console.log(' (Root near origin, leaves near boundary)');
|
||||
console.log();
|
||||
|
||||
// 4. Sample embeddings
|
||||
console.log('4. Sample Embeddings (2D Poincaré Coordinates):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const samples = ['Market', 'Technology', 'Software', 'MSFT', 'Finance', 'Banks', 'JPM'];
|
||||
|
||||
console.log(' Entity │ x │ y │ Depth');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const entity of samples) {
|
||||
const emb = embedding.getEmbedding(entity);
|
||||
if (emb) {
|
||||
const depth = embedding.getDepth(entity);
|
||||
console.log(` ${entity.padEnd(16)} │ ${emb[0].toFixed(5).padStart(8)} │ ${emb[1].toFixed(5).padStart(8)} │ ${depth.toFixed(4)}`);
|
||||
}
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 5. Nearest neighbors
|
||||
console.log('5. Nearest Neighbors (Hyperbolic Distance):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const queryStocks = ['AAPL', 'JPM', 'XOM'];
|
||||
|
||||
for (const stock of queryStocks) {
|
||||
const neighbors = embedding.findNearest(stock, 5);
|
||||
console.log(` ${stock} neighbors:`);
|
||||
for (const { entity, distance } of neighbors) {
|
||||
console.log(` ${entity.padEnd(12)} d=${distance.toFixed(4)}`);
|
||||
}
|
||||
console.log();
|
||||
}
|
||||
|
||||
// 6. Hyperbolic distance properties
|
||||
console.log('6. Hyperbolic Distance Properties:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const poincare = embedding.poincare;
|
||||
|
||||
// Same industry
|
||||
const samIndustry = poincare.distance(
|
||||
embedding.getEmbedding('MSFT'),
|
||||
embedding.getEmbedding('ORCL')
|
||||
);
|
||||
|
||||
// Same sector, different industry
|
||||
const sameSector = poincare.distance(
|
||||
embedding.getEmbedding('MSFT'),
|
||||
embedding.getEmbedding('NVDA')
|
||||
);
|
||||
|
||||
// Different sector
|
||||
const diffSector = poincare.distance(
|
||||
embedding.getEmbedding('MSFT'),
|
||||
embedding.getEmbedding('JPM')
|
||||
);
|
||||
|
||||
console.log(' Distance comparisons:');
|
||||
console.log(` MSFT ↔ ORCL (same industry): ${samIndustry.toFixed(4)}`);
|
||||
console.log(` MSFT ↔ NVDA (same sector): ${sameSector.toFixed(4)}`);
|
||||
console.log(` MSFT ↔ JPM (diff sector): ${diffSector.toFixed(4)}`);
|
||||
console.log();
|
||||
console.log(' Distances increase with hierarchical distance ✓');
|
||||
console.log();
|
||||
|
||||
// 7. Visualization
|
||||
console.log('7. Poincaré Disk Visualization:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const visualizer = new HyperbolicVisualizer();
|
||||
const viz = visualizer.visualize(embedding);
|
||||
|
||||
console.log(viz);
|
||||
console.log();
|
||||
console.log(' Legend: ◉=Market ●=Sector ○=Industry ·=Stock');
|
||||
console.log();
|
||||
|
||||
// 8. Sector clusters
|
||||
console.log('8. Sector Clustering Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const [sector, industries] of Object.entries(hierarchy.sectors).slice(0, 3)) {
|
||||
const sectorEmb = embedding.getEmbedding(sector);
|
||||
|
||||
// Calculate average distance from sector to its stocks
|
||||
let avgDist = 0;
|
||||
let count = 0;
|
||||
|
||||
for (const industry of industries) {
|
||||
const stocks = hierarchy.industries[industry] || [];
|
||||
for (const stock of stocks) {
|
||||
const stockEmb = embedding.getEmbedding(stock);
|
||||
if (stockEmb) {
|
||||
avgDist += poincare.distance(sectorEmb, stockEmb);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
avgDist /= count || 1;
|
||||
|
||||
console.log(` ${sector}:`);
|
||||
console.log(` Avg distance to stocks: ${avgDist.toFixed(4)}`);
|
||||
console.log(` Stocks: ${industries.flatMap(i => hierarchy.industries[i] || []).slice(0, 5).join(', ')}...`);
|
||||
console.log();
|
||||
}
|
||||
|
||||
// 9. Trading implications
|
||||
console.log('9. Trading Implications:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(' Hyperbolic embeddings enable:');
|
||||
console.log();
|
||||
console.log(' 1. Hierarchical diversification:');
|
||||
console.log(' - Select stocks from different "branches"');
|
||||
console.log(' - Maximize hyperbolic distance for diversification');
|
||||
console.log();
|
||||
console.log(' 2. Sector rotation strategies:');
|
||||
console.log(' - Identify sector centroids');
|
||||
console.log(' - Track rotation by watching centroid distances');
|
||||
console.log();
|
||||
console.log(' 3. Pair trading:');
|
||||
console.log(' - Find pairs with small hyperbolic distance');
|
||||
console.log(' - These stocks should move together');
|
||||
console.log();
|
||||
|
||||
// 10. RuVector integration
|
||||
console.log('10. RuVector Vector Storage:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Hyperbolic embeddings stored as vectors:');
|
||||
console.log();
|
||||
|
||||
const appleEmb = embedding.getEmbedding('AAPL');
|
||||
console.log(` AAPL embedding: [${appleEmb.map(v => v.toFixed(4)).join(', ')}]`);
|
||||
console.log();
|
||||
console.log(' Note: Euclidean HNSW can be used after mapping');
|
||||
console.log(' to tangent space at origin for approximate NN.');
|
||||
console.log();
|
||||
console.log(' Use cases:');
|
||||
console.log(' - Find hierarchically similar stocks');
|
||||
console.log(' - Sector membership inference');
|
||||
console.log(' - Anomaly detection (stocks far from expected position)');
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Hyperbolic market embeddings completed');
|
||||
console.log('═'.repeat(70));
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
811
examples/neural-trader/exotic/multi-agent-swarm.js
Normal file
811
examples/neural-trader/exotic/multi-agent-swarm.js
Normal file
@@ -0,0 +1,811 @@
|
||||
/**
|
||||
* Multi-Agent Swarm Trading Coordination
|
||||
*
|
||||
* EXOTIC: Distributed intelligence for market analysis
|
||||
*
|
||||
* Uses @neural-trader with RuVector for:
|
||||
* - Specialized agent roles (momentum, mean-reversion, sentiment, arbitrage)
|
||||
* - Consensus mechanisms for trade decisions
|
||||
* - Pheromone-inspired signal propagation
|
||||
* - Emergent collective intelligence
|
||||
*
|
||||
* Each agent maintains its own vector memory in RuVector,
|
||||
* with cross-agent communication via shared memory space.
|
||||
*/
|
||||
|
||||
// Ring buffer for efficient bounded memory
|
||||
class RingBuffer {
|
||||
constructor(capacity) {
|
||||
this.capacity = capacity;
|
||||
this.buffer = new Array(capacity);
|
||||
this.head = 0;
|
||||
this.size = 0;
|
||||
}
|
||||
|
||||
push(item) {
|
||||
this.buffer[this.head] = item;
|
||||
this.head = (this.head + 1) % this.capacity;
|
||||
if (this.size < this.capacity) this.size++;
|
||||
}
|
||||
|
||||
getAll() {
|
||||
if (this.size < this.capacity) {
|
||||
return this.buffer.slice(0, this.size);
|
||||
}
|
||||
return [...this.buffer.slice(this.head), ...this.buffer.slice(0, this.head)];
|
||||
}
|
||||
|
||||
getLast(n) {
|
||||
const all = this.getAll();
|
||||
return all.slice(-Math.min(n, all.length));
|
||||
}
|
||||
|
||||
get length() {
|
||||
return this.size;
|
||||
}
|
||||
}
|
||||
|
||||
// Signal pool for object reuse
|
||||
class SignalPool {
|
||||
constructor(initialSize = 100) {
|
||||
this.pool = [];
|
||||
for (let i = 0; i < initialSize; i++) {
|
||||
this.pool.push({ direction: 0, confidence: 0, timestamp: 0, reason: '' });
|
||||
}
|
||||
}
|
||||
|
||||
acquire(direction, confidence, reason) {
|
||||
let signal = this.pool.pop();
|
||||
if (!signal) {
|
||||
signal = { direction: 0, confidence: 0, timestamp: 0, reason: '' };
|
||||
}
|
||||
signal.direction = direction;
|
||||
signal.confidence = confidence;
|
||||
signal.timestamp = Date.now();
|
||||
signal.reason = reason;
|
||||
return signal;
|
||||
}
|
||||
|
||||
release(signal) {
|
||||
if (this.pool.length < 500) {
|
||||
this.pool.push(signal);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const signalPool = new SignalPool(200);
|
||||
|
||||
// Swarm configuration
|
||||
const swarmConfig = {
|
||||
// Agent types with specializations
|
||||
agents: {
|
||||
momentum: { count: 3, weight: 0.25, lookback: 20 },
|
||||
meanReversion: { count: 2, weight: 0.20, zscore: 2.0 },
|
||||
sentiment: { count: 2, weight: 0.15, threshold: 0.6 },
|
||||
arbitrage: { count: 1, weight: 0.15, minSpread: 0.001 },
|
||||
volatility: { count: 2, weight: 0.25, regime: 'adaptive' }
|
||||
},
|
||||
|
||||
// Consensus parameters
|
||||
consensus: {
|
||||
method: 'weighted_vote', // weighted_vote, byzantine, raft
|
||||
quorum: 0.6, // 60% agreement needed
|
||||
timeout: 1000, // ms to wait for votes
|
||||
minConfidence: 0.7 // Minimum confidence to act
|
||||
},
|
||||
|
||||
// Pheromone decay for signal propagation
|
||||
pheromone: {
|
||||
decayRate: 0.95,
|
||||
reinforcement: 1.5,
|
||||
evaporationTime: 300000 // 5 minutes
|
||||
}
|
||||
};
|
||||
|
||||
// Base Agent class
|
||||
class TradingAgent {
|
||||
constructor(id, type, config) {
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.config = config;
|
||||
this.memory = [];
|
||||
this.signals = [];
|
||||
this.confidence = 0.5;
|
||||
this.performance = { wins: 0, losses: 0, pnl: 0 };
|
||||
this.maxSignals = 1000; // Bound signals array to prevent memory leak
|
||||
}
|
||||
|
||||
// Analyze market data and generate signal
|
||||
analyze(marketData) {
|
||||
throw new Error('Subclass must implement analyze()');
|
||||
}
|
||||
|
||||
// Update agent's memory with new observation
|
||||
updateMemory(observation) {
|
||||
this.memory.push({
|
||||
timestamp: Date.now(),
|
||||
observation,
|
||||
signal: this.signals[this.signals.length - 1]
|
||||
});
|
||||
|
||||
// Keep bounded memory
|
||||
if (this.memory.length > 1000) {
|
||||
this.memory.shift();
|
||||
}
|
||||
}
|
||||
|
||||
// Learn from outcome
|
||||
learn(outcome) {
|
||||
if (outcome.profitable) {
|
||||
this.performance.wins++;
|
||||
this.confidence = Math.min(0.95, this.confidence * 1.05);
|
||||
} else {
|
||||
this.performance.losses++;
|
||||
this.confidence = Math.max(0.1, this.confidence * 0.95);
|
||||
}
|
||||
this.performance.pnl += outcome.pnl;
|
||||
}
|
||||
}
|
||||
|
||||
// Momentum Agent - follows trends
|
||||
class MomentumAgent extends TradingAgent {
|
||||
constructor(id, config) {
|
||||
super(id, 'momentum', config);
|
||||
this.lookback = config.lookback || 20;
|
||||
}
|
||||
|
||||
analyze(marketData) {
|
||||
const prices = marketData.slice(-this.lookback);
|
||||
if (prices.length < this.lookback) {
|
||||
return { signal: 0, confidence: 0, reason: 'insufficient data' };
|
||||
}
|
||||
|
||||
// Calculate momentum as rate of change
|
||||
const oldPrice = prices[0].close;
|
||||
const newPrice = prices[prices.length - 1].close;
|
||||
const momentum = (newPrice - oldPrice) / oldPrice;
|
||||
|
||||
// Calculate trend strength via linear regression
|
||||
const n = prices.length;
|
||||
let sumX = 0, sumY = 0, sumXY = 0, sumX2 = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
sumX += i;
|
||||
sumY += prices[i].close;
|
||||
sumXY += i * prices[i].close;
|
||||
sumX2 += i * i;
|
||||
}
|
||||
const denominator = n * sumX2 - sumX * sumX;
|
||||
// Guard against division by zero (all prices identical)
|
||||
const slope = Math.abs(denominator) > 1e-10
|
||||
? (n * sumXY - sumX * sumY) / denominator
|
||||
: 0;
|
||||
const avgPrice = sumY / n;
|
||||
const normalizedSlope = avgPrice > 0 ? slope / avgPrice : 0;
|
||||
|
||||
// Signal strength based on momentum and trend alignment
|
||||
let signal = 0;
|
||||
let confidence = 0;
|
||||
|
||||
if (momentum > 0 && normalizedSlope > 0) {
|
||||
signal = 1; // Long
|
||||
confidence = Math.min(0.95, Math.abs(momentum) * 10 + Math.abs(normalizedSlope) * 100);
|
||||
} else if (momentum < 0 && normalizedSlope < 0) {
|
||||
signal = -1; // Short
|
||||
confidence = Math.min(0.95, Math.abs(momentum) * 10 + Math.abs(normalizedSlope) * 100);
|
||||
}
|
||||
|
||||
const result = {
|
||||
signal,
|
||||
confidence: confidence * this.confidence, // Weighted by agent's track record
|
||||
reason: `momentum=${(momentum * 100).toFixed(2)}%, slope=${(normalizedSlope * 10000).toFixed(2)}bps/bar`,
|
||||
agentId: this.id,
|
||||
agentType: this.type
|
||||
};
|
||||
|
||||
this.signals.push(result);
|
||||
// Bound signals array to prevent memory leak
|
||||
if (this.signals.length > this.maxSignals) {
|
||||
this.signals = this.signals.slice(-this.maxSignals);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// Mean Reversion Agent - fades extremes
|
||||
class MeanReversionAgent extends TradingAgent {
|
||||
constructor(id, config) {
|
||||
super(id, 'meanReversion', config);
|
||||
this.zscoreThreshold = config.zscore || 2.0;
|
||||
this.lookback = config.lookback || 50;
|
||||
}
|
||||
|
||||
analyze(marketData) {
|
||||
const prices = marketData.slice(-this.lookback).map(d => d.close);
|
||||
if (prices.length < 20) {
|
||||
return { signal: 0, confidence: 0, reason: 'insufficient data' };
|
||||
}
|
||||
|
||||
// Calculate z-score with division-by-zero guard
|
||||
const mean = prices.reduce((a, b) => a + b, 0) / prices.length;
|
||||
const variance = prices.reduce((sum, p) => sum + Math.pow(p - mean, 2), 0) / prices.length;
|
||||
const std = Math.sqrt(variance);
|
||||
const currentPrice = prices[prices.length - 1];
|
||||
// Guard against zero standard deviation (constant prices)
|
||||
const zscore = std > 1e-10 ? (currentPrice - mean) / std : 0;
|
||||
|
||||
let signal = 0;
|
||||
let confidence = 0;
|
||||
|
||||
if (zscore > this.zscoreThreshold) {
|
||||
signal = -1; // Short - price too high
|
||||
confidence = Math.min(0.9, (Math.abs(zscore) - this.zscoreThreshold) * 0.3);
|
||||
} else if (zscore < -this.zscoreThreshold) {
|
||||
signal = 1; // Long - price too low
|
||||
confidence = Math.min(0.9, (Math.abs(zscore) - this.zscoreThreshold) * 0.3);
|
||||
}
|
||||
|
||||
const result = {
|
||||
signal,
|
||||
confidence: confidence * this.confidence,
|
||||
reason: `zscore=${zscore.toFixed(2)}, mean=${mean.toFixed(2)}, std=${std.toFixed(4)}`,
|
||||
agentId: this.id,
|
||||
agentType: this.type
|
||||
};
|
||||
|
||||
this.signals.push(result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// Sentiment Agent - analyzes market sentiment
|
||||
class SentimentAgent extends TradingAgent {
|
||||
constructor(id, config) {
|
||||
super(id, 'sentiment', config);
|
||||
this.threshold = config.threshold || 0.6;
|
||||
}
|
||||
|
||||
analyze(marketData) {
|
||||
// Derive sentiment from price action (in production, use news/social data)
|
||||
const recent = marketData.slice(-10);
|
||||
if (recent.length < 5) {
|
||||
return { signal: 0, confidence: 0, reason: 'insufficient data' };
|
||||
}
|
||||
|
||||
// Count bullish vs bearish candles
|
||||
let bullish = 0, bearish = 0;
|
||||
let volumeUp = 0, volumeDown = 0;
|
||||
|
||||
for (const candle of recent) {
|
||||
if (candle.close > candle.open) {
|
||||
bullish++;
|
||||
volumeUp += candle.volume || 1;
|
||||
} else {
|
||||
bearish++;
|
||||
volumeDown += candle.volume || 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Volume-weighted sentiment
|
||||
const totalVolume = volumeUp + volumeDown;
|
||||
const sentiment = totalVolume > 0
|
||||
? (volumeUp - volumeDown) / totalVolume
|
||||
: (bullish - bearish) / recent.length;
|
||||
|
||||
let signal = 0;
|
||||
let confidence = 0;
|
||||
|
||||
if (sentiment > this.threshold - 0.5) {
|
||||
signal = 1;
|
||||
confidence = Math.abs(sentiment);
|
||||
} else if (sentiment < -(this.threshold - 0.5)) {
|
||||
signal = -1;
|
||||
confidence = Math.abs(sentiment);
|
||||
}
|
||||
|
||||
const result = {
|
||||
signal,
|
||||
confidence: confidence * this.confidence,
|
||||
reason: `sentiment=${sentiment.toFixed(2)}, bullish=${bullish}/${recent.length}`,
|
||||
agentId: this.id,
|
||||
agentType: this.type
|
||||
};
|
||||
|
||||
this.signals.push(result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// Volatility Regime Agent - adapts to market conditions
|
||||
class VolatilityAgent extends TradingAgent {
|
||||
constructor(id, config) {
|
||||
super(id, 'volatility', config);
|
||||
this.lookback = 20;
|
||||
}
|
||||
|
||||
analyze(marketData) {
|
||||
const prices = marketData.slice(-this.lookback);
|
||||
if (prices.length < 10) {
|
||||
return { signal: 0, confidence: 0, reason: 'insufficient data' };
|
||||
}
|
||||
|
||||
// Calculate returns
|
||||
const returns = [];
|
||||
for (let i = 1; i < prices.length; i++) {
|
||||
returns.push((prices[i].close - prices[i-1].close) / prices[i-1].close);
|
||||
}
|
||||
|
||||
// Calculate realized volatility
|
||||
const mean = returns.reduce((a, b) => a + b, 0) / returns.length;
|
||||
const variance = returns.reduce((sum, r) => sum + Math.pow(r - mean, 2), 0) / returns.length;
|
||||
const volatility = Math.sqrt(variance) * Math.sqrt(252); // Annualized
|
||||
|
||||
// Detect regime
|
||||
const highVolThreshold = 0.30; // 30% annualized
|
||||
const lowVolThreshold = 0.15; // 15% annualized
|
||||
|
||||
let regime = 'normal';
|
||||
let signal = 0;
|
||||
let confidence = 0;
|
||||
|
||||
if (volatility > highVolThreshold) {
|
||||
regime = 'high';
|
||||
// In high vol, mean reversion tends to work
|
||||
const lastReturn = returns[returns.length - 1];
|
||||
if (Math.abs(lastReturn) > variance * 2) {
|
||||
signal = lastReturn > 0 ? -1 : 1; // Fade the move
|
||||
confidence = 0.6;
|
||||
}
|
||||
} else if (volatility < lowVolThreshold) {
|
||||
regime = 'low';
|
||||
// In low vol, momentum tends to work
|
||||
const recentMomentum = prices[prices.length - 1].close / prices[0].close - 1;
|
||||
signal = recentMomentum > 0 ? 1 : -1;
|
||||
confidence = 0.5;
|
||||
}
|
||||
|
||||
const result = {
|
||||
signal,
|
||||
confidence: confidence * this.confidence,
|
||||
reason: `regime=${regime}, vol=${(volatility * 100).toFixed(1)}%`,
|
||||
regime,
|
||||
volatility,
|
||||
agentId: this.id,
|
||||
agentType: this.type
|
||||
};
|
||||
|
||||
this.signals.push(result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// Swarm Coordinator - manages consensus
|
||||
class SwarmCoordinator {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.agents = [];
|
||||
this.pheromoneTrails = new Map();
|
||||
this.consensusHistory = [];
|
||||
}
|
||||
|
||||
// Initialize agent swarm
|
||||
initializeSwarm() {
|
||||
let agentId = 0;
|
||||
|
||||
// Create momentum agents
|
||||
for (let i = 0; i < this.config.agents.momentum.count; i++) {
|
||||
this.agents.push(new MomentumAgent(agentId++, {
|
||||
...this.config.agents.momentum,
|
||||
lookback: 10 + i * 10 // Different lookbacks
|
||||
}));
|
||||
}
|
||||
|
||||
// Create mean reversion agents
|
||||
for (let i = 0; i < this.config.agents.meanReversion.count; i++) {
|
||||
this.agents.push(new MeanReversionAgent(agentId++, {
|
||||
...this.config.agents.meanReversion,
|
||||
zscore: 1.5 + i * 0.5
|
||||
}));
|
||||
}
|
||||
|
||||
// Create sentiment agents
|
||||
for (let i = 0; i < this.config.agents.sentiment.count; i++) {
|
||||
this.agents.push(new SentimentAgent(agentId++, this.config.agents.sentiment));
|
||||
}
|
||||
|
||||
// Create volatility agents
|
||||
for (let i = 0; i < this.config.agents.volatility.count; i++) {
|
||||
this.agents.push(new VolatilityAgent(agentId++, this.config.agents.volatility));
|
||||
}
|
||||
|
||||
console.log(`Initialized swarm with ${this.agents.length} agents`);
|
||||
}
|
||||
|
||||
// Gather signals from all agents
|
||||
gatherSignals(marketData) {
|
||||
const signals = [];
|
||||
|
||||
for (const agent of this.agents) {
|
||||
const signal = agent.analyze(marketData);
|
||||
signals.push(signal);
|
||||
}
|
||||
|
||||
return signals;
|
||||
}
|
||||
|
||||
// Weighted voting consensus
|
||||
weightedVoteConsensus(signals) {
|
||||
let totalWeight = 0;
|
||||
let weightedSum = 0;
|
||||
let totalConfidence = 0;
|
||||
|
||||
const agentWeights = this.config.agents;
|
||||
|
||||
for (const signal of signals) {
|
||||
if (signal.signal === 0) continue;
|
||||
|
||||
const typeWeight = agentWeights[signal.agentType]?.weight || 0.1;
|
||||
const weight = typeWeight * signal.confidence;
|
||||
|
||||
weightedSum += signal.signal * weight;
|
||||
totalWeight += weight;
|
||||
totalConfidence += signal.confidence;
|
||||
}
|
||||
|
||||
if (totalWeight === 0) {
|
||||
return { decision: 0, confidence: 0, reason: 'no signals' };
|
||||
}
|
||||
|
||||
const normalizedSignal = weightedSum / totalWeight;
|
||||
const avgConfidence = totalConfidence / signals.length;
|
||||
|
||||
// Apply quorum requirement
|
||||
const activeSignals = signals.filter(s => s.signal !== 0);
|
||||
const quorum = activeSignals.length / signals.length;
|
||||
|
||||
if (quorum < this.config.consensus.quorum) {
|
||||
return {
|
||||
decision: 0,
|
||||
confidence: 0,
|
||||
reason: `quorum not met (${(quorum * 100).toFixed(0)}% < ${(this.config.consensus.quorum * 100).toFixed(0)}%)`
|
||||
};
|
||||
}
|
||||
|
||||
// Determine final decision
|
||||
let decision = 0;
|
||||
if (normalizedSignal > 0.3) decision = 1;
|
||||
else if (normalizedSignal < -0.3) decision = -1;
|
||||
|
||||
return {
|
||||
decision,
|
||||
confidence: avgConfidence * Math.abs(normalizedSignal),
|
||||
normalizedSignal,
|
||||
quorum,
|
||||
reason: `weighted_vote=${normalizedSignal.toFixed(3)}, quorum=${(quorum * 100).toFixed(0)}%`
|
||||
};
|
||||
}
|
||||
|
||||
// Byzantine fault tolerant consensus (simplified)
|
||||
byzantineConsensus(signals) {
|
||||
// In BFT, we need 2f+1 agreeing votes to tolerate f faulty nodes
|
||||
const activeSignals = signals.filter(s => s.signal !== 0);
|
||||
const n = activeSignals.length;
|
||||
const f = Math.floor((n - 1) / 3); // Max faulty nodes
|
||||
const requiredAgreement = 2 * f + 1;
|
||||
|
||||
const votes = { long: 0, short: 0, neutral: 0 };
|
||||
for (const signal of signals) {
|
||||
if (signal.signal > 0) votes.long++;
|
||||
else if (signal.signal < 0) votes.short++;
|
||||
else votes.neutral++;
|
||||
}
|
||||
|
||||
let decision = 0;
|
||||
let confidence = 0;
|
||||
|
||||
if (votes.long >= requiredAgreement) {
|
||||
decision = 1;
|
||||
confidence = votes.long / n;
|
||||
} else if (votes.short >= requiredAgreement) {
|
||||
decision = -1;
|
||||
confidence = votes.short / n;
|
||||
}
|
||||
|
||||
return {
|
||||
decision,
|
||||
confidence,
|
||||
votes,
|
||||
requiredAgreement,
|
||||
reason: `BFT: L=${votes.long}, S=${votes.short}, N=${votes.neutral}, need=${requiredAgreement}`
|
||||
};
|
||||
}
|
||||
|
||||
// Main consensus method
|
||||
reachConsensus(signals) {
|
||||
let consensus;
|
||||
|
||||
switch (this.config.consensus.method) {
|
||||
case 'byzantine':
|
||||
consensus = this.byzantineConsensus(signals);
|
||||
break;
|
||||
case 'weighted_vote':
|
||||
default:
|
||||
consensus = this.weightedVoteConsensus(signals);
|
||||
}
|
||||
|
||||
// Apply minimum confidence threshold
|
||||
if (consensus.confidence < this.config.consensus.minConfidence) {
|
||||
consensus.decision = 0;
|
||||
consensus.reason += ` (confidence ${(consensus.confidence * 100).toFixed(0)}% < ${(this.config.consensus.minConfidence * 100).toFixed(0)}%)`;
|
||||
}
|
||||
|
||||
// Update pheromone trails
|
||||
this.updatePheromones(consensus);
|
||||
|
||||
this.consensusHistory.push({
|
||||
timestamp: Date.now(),
|
||||
consensus,
|
||||
signalCount: signals.length
|
||||
});
|
||||
|
||||
// Bound consensus history to prevent memory leak
|
||||
if (this.consensusHistory.length > 1000) {
|
||||
this.consensusHistory = this.consensusHistory.slice(-500);
|
||||
}
|
||||
|
||||
return consensus;
|
||||
}
|
||||
|
||||
// Pheromone-based signal reinforcement
|
||||
updatePheromones(consensus) {
|
||||
const now = Date.now();
|
||||
|
||||
// Decay existing pheromones
|
||||
for (const [key, trail] of this.pheromoneTrails) {
|
||||
const age = now - trail.timestamp;
|
||||
trail.strength *= Math.pow(this.config.pheromone.decayRate, age / 1000);
|
||||
|
||||
if (trail.strength < 0.01) {
|
||||
this.pheromoneTrails.delete(key);
|
||||
}
|
||||
}
|
||||
|
||||
// Reinforce based on consensus
|
||||
if (consensus.decision !== 0) {
|
||||
const key = consensus.decision > 0 ? 'bullish' : 'bearish';
|
||||
const existing = this.pheromoneTrails.get(key) || { strength: 0, timestamp: now };
|
||||
|
||||
existing.strength = Math.min(1.0,
|
||||
existing.strength + consensus.confidence * this.config.pheromone.reinforcement
|
||||
);
|
||||
existing.timestamp = now;
|
||||
|
||||
this.pheromoneTrails.set(key, existing);
|
||||
}
|
||||
}
|
||||
|
||||
// Learn from trade outcome
|
||||
learnFromOutcome(outcome) {
|
||||
for (const agent of this.agents) {
|
||||
agent.learn(outcome);
|
||||
}
|
||||
}
|
||||
|
||||
// Get swarm statistics
|
||||
getSwarmStats() {
|
||||
const stats = {
|
||||
totalAgents: this.agents.length,
|
||||
byType: {},
|
||||
avgConfidence: 0,
|
||||
totalWins: 0,
|
||||
totalLosses: 0,
|
||||
totalPnL: 0,
|
||||
pheromones: {}
|
||||
};
|
||||
|
||||
for (const agent of this.agents) {
|
||||
if (!stats.byType[agent.type]) {
|
||||
stats.byType[agent.type] = { count: 0, avgConfidence: 0, pnl: 0 };
|
||||
}
|
||||
stats.byType[agent.type].count++;
|
||||
stats.byType[agent.type].avgConfidence += agent.confidence;
|
||||
stats.byType[agent.type].pnl += agent.performance.pnl;
|
||||
stats.avgConfidence += agent.confidence;
|
||||
stats.totalWins += agent.performance.wins;
|
||||
stats.totalLosses += agent.performance.losses;
|
||||
stats.totalPnL += agent.performance.pnl;
|
||||
}
|
||||
|
||||
stats.avgConfidence /= this.agents.length || 1;
|
||||
|
||||
// Use Object.entries for object iteration (stats.byType is an object, not Map)
|
||||
for (const [key, value] of Object.entries(stats.byType)) {
|
||||
stats.byType[key].avgConfidence /= value.count || 1;
|
||||
}
|
||||
|
||||
for (const [key, trail] of this.pheromoneTrails) {
|
||||
stats.pheromones[key] = trail.strength;
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate synthetic market data
|
||||
function generateMarketData(n, seed = 42) {
|
||||
const data = [];
|
||||
let price = 100;
|
||||
|
||||
let rng = seed;
|
||||
const random = () => {
|
||||
rng = (rng * 9301 + 49297) % 233280;
|
||||
return rng / 233280;
|
||||
};
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
const regime = Math.sin(i / 100) > 0 ? 'trend' : 'mean-revert';
|
||||
const volatility = regime === 'trend' ? 0.015 : 0.025;
|
||||
|
||||
let drift = 0;
|
||||
if (regime === 'trend') {
|
||||
drift = 0.0003 * Math.sign(Math.sin(i / 200));
|
||||
}
|
||||
|
||||
const return_ = drift + volatility * (random() + random() - 1);
|
||||
const open = price;
|
||||
price = price * (1 + return_);
|
||||
|
||||
const high = Math.max(open, price) * (1 + random() * 0.005);
|
||||
const low = Math.min(open, price) * (1 - random() * 0.005);
|
||||
const volume = 1000000 * (0.5 + random());
|
||||
|
||||
data.push({
|
||||
timestamp: Date.now() - (n - i) * 60000,
|
||||
open,
|
||||
high,
|
||||
low,
|
||||
close: price,
|
||||
volume,
|
||||
regime
|
||||
});
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('MULTI-AGENT SWARM TRADING COORDINATION');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// 1. Initialize swarm
|
||||
console.log('1. Swarm Initialization:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const coordinator = new SwarmCoordinator(swarmConfig);
|
||||
coordinator.initializeSwarm();
|
||||
|
||||
console.log();
|
||||
console.log(' Agent Distribution:');
|
||||
for (const [type, config] of Object.entries(swarmConfig.agents)) {
|
||||
console.log(` - ${type}: ${config.count} agents (weight: ${(config.weight * 100).toFixed(0)}%)`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 2. Generate market data
|
||||
console.log('2. Market Data Simulation:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const marketData = generateMarketData(500);
|
||||
console.log(` Generated ${marketData.length} candles`);
|
||||
console.log(` Price range: $${Math.min(...marketData.map(d => d.low)).toFixed(2)} - $${Math.max(...marketData.map(d => d.high)).toFixed(2)}`);
|
||||
console.log();
|
||||
|
||||
// 3. Run swarm analysis
|
||||
console.log('3. Swarm Analysis (Rolling Window):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const decisions = [];
|
||||
const lookback = 100;
|
||||
|
||||
for (let i = lookback; i < marketData.length; i += 10) {
|
||||
const window = marketData.slice(i - lookback, i);
|
||||
const signals = coordinator.gatherSignals(window);
|
||||
const consensus = coordinator.reachConsensus(signals);
|
||||
|
||||
decisions.push({
|
||||
index: i,
|
||||
price: marketData[i].close,
|
||||
consensus,
|
||||
signals
|
||||
});
|
||||
|
||||
// Simulate outcome for learning
|
||||
if (i + 10 < marketData.length) {
|
||||
const futureReturn = (marketData[i + 10].close - marketData[i].close) / marketData[i].close;
|
||||
const profitable = consensus.decision * futureReturn > 0;
|
||||
coordinator.learnFromOutcome({
|
||||
profitable,
|
||||
pnl: consensus.decision * futureReturn * 10000 // in bps
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
console.log(` Analyzed ${decisions.length} decision points`);
|
||||
console.log();
|
||||
|
||||
// 4. Decision summary
|
||||
console.log('4. Decision Summary:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const longDecisions = decisions.filter(d => d.consensus.decision === 1).length;
|
||||
const shortDecisions = decisions.filter(d => d.consensus.decision === -1).length;
|
||||
const neutralDecisions = decisions.filter(d => d.consensus.decision === 0).length;
|
||||
|
||||
console.log(` Long signals: ${longDecisions} (${(longDecisions / decisions.length * 100).toFixed(1)}%)`);
|
||||
console.log(` Short signals: ${shortDecisions} (${(shortDecisions / decisions.length * 100).toFixed(1)}%)`);
|
||||
console.log(` Neutral: ${neutralDecisions} (${(neutralDecisions / decisions.length * 100).toFixed(1)}%)`);
|
||||
console.log();
|
||||
|
||||
// 5. Sample decisions
|
||||
console.log('5. Sample Decisions (Last 5):');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Index │ Price │ Decision │ Confidence │ Reason');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const lastDecisions = decisions.slice(-5);
|
||||
for (const d of lastDecisions) {
|
||||
const decision = d.consensus.decision === 1 ? 'LONG ' : d.consensus.decision === -1 ? 'SHORT' : 'HOLD ';
|
||||
const conf = (d.consensus.confidence * 100).toFixed(0);
|
||||
console.log(` ${String(d.index).padStart(5)} │ $${d.price.toFixed(2).padStart(6)} │ ${decision} │ ${conf.padStart(6)}% │ ${d.consensus.reason}`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 6. Agent performance
|
||||
console.log('6. Swarm Performance:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const stats = coordinator.getSwarmStats();
|
||||
console.log(` Total P&L: ${stats.totalPnL.toFixed(0)} bps`);
|
||||
console.log(` Win/Loss: ${stats.totalWins}/${stats.totalLosses}`);
|
||||
console.log(` Win Rate: ${((stats.totalWins / (stats.totalWins + stats.totalLosses)) * 100).toFixed(1)}%`);
|
||||
console.log(` Avg Confidence: ${(stats.avgConfidence * 100).toFixed(1)}%`);
|
||||
console.log();
|
||||
|
||||
console.log(' Performance by Agent Type:');
|
||||
for (const [type, data] of Object.entries(stats.byType)) {
|
||||
console.log(` - ${type.padEnd(15)} P&L: ${data.pnl.toFixed(0).padStart(6)} bps`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 7. Pheromone state
|
||||
console.log('7. Pheromone Trails (Signal Strength):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
for (const [direction, strength] of Object.entries(stats.pheromones)) {
|
||||
const bar = '█'.repeat(Math.floor(strength * 40));
|
||||
console.log(` ${direction.padEnd(10)} ${'['.padEnd(1)}${bar.padEnd(40)}] ${(strength * 100).toFixed(1)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 8. Consensus visualization
|
||||
console.log('8. Consensus Timeline (Last 20 decisions):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const timeline = decisions.slice(-20);
|
||||
let timelineStr = ' ';
|
||||
for (const d of timeline) {
|
||||
if (d.consensus.decision === 1) timelineStr += '▲';
|
||||
else if (d.consensus.decision === -1) timelineStr += '▼';
|
||||
else timelineStr += '─';
|
||||
}
|
||||
console.log(timelineStr);
|
||||
console.log(' ▲=Long ▼=Short ─=Hold');
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Multi-agent swarm analysis completed');
|
||||
console.log('═'.repeat(70));
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
831
examples/neural-trader/exotic/quantum-portfolio-optimization.js
Normal file
831
examples/neural-trader/exotic/quantum-portfolio-optimization.js
Normal file
@@ -0,0 +1,831 @@
|
||||
/**
|
||||
* Quantum-Inspired Portfolio Optimization
|
||||
*
|
||||
* EXOTIC: Quantum annealing and QAOA for portfolio selection
|
||||
*
|
||||
* Uses @neural-trader/portfolio with RuVector for:
|
||||
* - Quantum Approximate Optimization Algorithm (QAOA) simulation
|
||||
* - Simulated quantum annealing for combinatorial optimization
|
||||
* - Qubit encoding of portfolio weights
|
||||
* - Quantum interference for exploring solution space
|
||||
*
|
||||
* Classical simulation of quantum concepts for optimization
|
||||
* problems that are NP-hard classically.
|
||||
*/
|
||||
|
||||
// Quantum optimization configuration
|
||||
const quantumConfig = {
|
||||
// QAOA parameters
|
||||
qaoa: {
|
||||
layers: 3, // Number of QAOA layers (p)
|
||||
shots: 1000, // Measurement samples
|
||||
optimizer: 'cobyla', // Classical optimizer for angles
|
||||
maxIterations: 100
|
||||
},
|
||||
|
||||
// Annealing parameters
|
||||
annealing: {
|
||||
initialTemp: 100,
|
||||
finalTemp: 0.01,
|
||||
coolingRate: 0.99,
|
||||
sweeps: 1000
|
||||
},
|
||||
|
||||
// Portfolio constraints
|
||||
portfolio: {
|
||||
numAssets: 10,
|
||||
minWeight: 0.0,
|
||||
maxWeight: 0.3,
|
||||
targetReturn: 0.10,
|
||||
riskAversion: 2.0,
|
||||
cardinalityConstraint: 5 // Max assets in portfolio
|
||||
},
|
||||
|
||||
// Qubit encoding
|
||||
encoding: {
|
||||
bitsPerWeight: 4, // Weight precision: 2^4 = 16 levels
|
||||
penaltyWeight: 100 // Constraint violation penalty
|
||||
}
|
||||
};
|
||||
|
||||
// Object pool for Complex numbers (reduces GC pressure)
|
||||
class ComplexPool {
|
||||
constructor(initialSize = 1024) {
|
||||
this.pool = [];
|
||||
this.index = 0;
|
||||
for (let i = 0; i < initialSize; i++) {
|
||||
this.pool.push(new Complex(0, 0));
|
||||
}
|
||||
}
|
||||
|
||||
acquire(real = 0, imag = 0) {
|
||||
if (this.index < this.pool.length) {
|
||||
const c = this.pool[this.index++];
|
||||
c.real = real;
|
||||
c.imag = imag;
|
||||
return c;
|
||||
}
|
||||
// Expand pool if needed
|
||||
const c = new Complex(real, imag);
|
||||
this.pool.push(c);
|
||||
this.index++;
|
||||
return c;
|
||||
}
|
||||
|
||||
reset() {
|
||||
this.index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Global pool instance for reuse
|
||||
const complexPool = new ComplexPool(4096);
|
||||
|
||||
// Complex number class for quantum states
|
||||
class Complex {
|
||||
constructor(real, imag = 0) {
|
||||
this.real = real;
|
||||
this.imag = imag;
|
||||
}
|
||||
|
||||
add(other) {
|
||||
return new Complex(this.real + other.real, this.imag + other.imag);
|
||||
}
|
||||
|
||||
// In-place add (avoids allocation)
|
||||
addInPlace(other) {
|
||||
this.real += other.real;
|
||||
this.imag += other.imag;
|
||||
return this;
|
||||
}
|
||||
|
||||
multiply(other) {
|
||||
return new Complex(
|
||||
this.real * other.real - this.imag * other.imag,
|
||||
this.real * other.imag + this.imag * other.real
|
||||
);
|
||||
}
|
||||
|
||||
// In-place multiply (avoids allocation)
|
||||
multiplyInPlace(other) {
|
||||
const newReal = this.real * other.real - this.imag * other.imag;
|
||||
const newImag = this.real * other.imag + this.imag * other.real;
|
||||
this.real = newReal;
|
||||
this.imag = newImag;
|
||||
return this;
|
||||
}
|
||||
|
||||
scale(s) {
|
||||
return new Complex(this.real * s, this.imag * s);
|
||||
}
|
||||
|
||||
// In-place scale (avoids allocation)
|
||||
scaleInPlace(s) {
|
||||
this.real *= s;
|
||||
this.imag *= s;
|
||||
return this;
|
||||
}
|
||||
|
||||
magnitude() {
|
||||
return Math.sqrt(this.real * this.real + this.imag * this.imag);
|
||||
}
|
||||
|
||||
magnitudeSq() {
|
||||
return this.real * this.real + this.imag * this.imag;
|
||||
}
|
||||
|
||||
static exp(theta) {
|
||||
return new Complex(Math.cos(theta), Math.sin(theta));
|
||||
}
|
||||
}
|
||||
|
||||
// Quantum State (simplified simulation)
|
||||
class QuantumState {
|
||||
constructor(numQubits) {
|
||||
this.numQubits = numQubits;
|
||||
this.dim = Math.pow(2, numQubits);
|
||||
this.amplitudes = new Array(this.dim).fill(null).map(() => new Complex(0));
|
||||
this.amplitudes[0] = new Complex(1); // Initialize to |0...0⟩
|
||||
}
|
||||
|
||||
// Create uniform superposition (Hadamard on all qubits)
|
||||
hadamardAll() {
|
||||
const newAmps = new Array(this.dim).fill(null).map(() => new Complex(0));
|
||||
const norm = 1 / Math.sqrt(this.dim);
|
||||
|
||||
for (let i = 0; i < this.dim; i++) {
|
||||
newAmps[i] = new Complex(norm);
|
||||
}
|
||||
|
||||
this.amplitudes = newAmps;
|
||||
}
|
||||
|
||||
// Apply cost Hamiltonian phase (problem encoding)
|
||||
applyCostPhase(gamma, costFunction) {
|
||||
for (let i = 0; i < this.dim; i++) {
|
||||
const cost = costFunction(i);
|
||||
const phase = Complex.exp(-gamma * cost);
|
||||
this.amplitudes[i] = this.amplitudes[i].multiply(phase);
|
||||
}
|
||||
}
|
||||
|
||||
// Apply mixer Hamiltonian (exploration)
|
||||
// Implements exp(-i * beta * sum_j X_j) where X_j is Pauli-X on qubit j
|
||||
applyMixerPhase(beta) {
|
||||
const cos = Math.cos(beta);
|
||||
const sin = Math.sin(beta);
|
||||
|
||||
// Apply Rx(2*beta) to each qubit individually
|
||||
// Rx(theta) = cos(theta/2)*I - i*sin(theta/2)*X
|
||||
for (let q = 0; q < this.numQubits; q++) {
|
||||
const newAmps = new Array(this.dim).fill(null).map(() => new Complex(0));
|
||||
|
||||
for (let i = 0; i < this.dim; i++) {
|
||||
const neighbor = i ^ (1 << q); // Flip qubit q
|
||||
|
||||
// |i⟩ -> cos(beta)|i⟩ - i*sin(beta)|neighbor⟩
|
||||
newAmps[i] = newAmps[i].add(this.amplitudes[i].scale(cos));
|
||||
newAmps[i] = newAmps[i].add(
|
||||
new Complex(0, -sin).multiply(this.amplitudes[neighbor])
|
||||
);
|
||||
}
|
||||
|
||||
// Update amplitudes after each qubit rotation
|
||||
for (let i = 0; i < this.dim; i++) {
|
||||
this.amplitudes[i] = newAmps[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize to handle numerical errors
|
||||
let norm = 0;
|
||||
for (const amp of this.amplitudes) {
|
||||
norm += amp.magnitude() ** 2;
|
||||
}
|
||||
norm = Math.sqrt(norm);
|
||||
|
||||
// Guard against division by zero
|
||||
if (norm > 1e-10) {
|
||||
for (let i = 0; i < this.dim; i++) {
|
||||
this.amplitudes[i] = this.amplitudes[i].scale(1 / norm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Measure (sample from probability distribution)
|
||||
measure() {
|
||||
const probabilities = this.amplitudes.map(a => a.magnitude() ** 2);
|
||||
|
||||
// Normalize probabilities with guard against zero total
|
||||
const total = probabilities.reduce((a, b) => a + b, 0);
|
||||
if (total < 1e-10) {
|
||||
// Fallback to uniform distribution
|
||||
return Math.floor(Math.random() * this.dim);
|
||||
}
|
||||
const normalized = probabilities.map(p => p / total);
|
||||
|
||||
// Sample
|
||||
const r = Math.random();
|
||||
let cumulative = 0;
|
||||
|
||||
for (let i = 0; i < this.dim; i++) {
|
||||
cumulative += normalized[i];
|
||||
if (r < cumulative) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return this.dim - 1;
|
||||
}
|
||||
|
||||
// Get probability distribution
|
||||
getProbabilities() {
|
||||
const probs = this.amplitudes.map(a => a.magnitude() ** 2);
|
||||
const total = probs.reduce((a, b) => a + b, 0);
|
||||
return probs.map(p => p / total);
|
||||
}
|
||||
}
|
||||
|
||||
// QAOA Optimizer
|
||||
class QAOAOptimizer {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.bestSolution = null;
|
||||
this.bestCost = Infinity;
|
||||
this.history = [];
|
||||
}
|
||||
|
||||
// Define cost function (portfolio objective)
|
||||
createCostFunction(expectedReturns, covarianceMatrix, riskAversion) {
|
||||
return (bitstring) => {
|
||||
const weights = this.decodeWeights(bitstring);
|
||||
|
||||
// Expected return
|
||||
const expectedReturn = weights.reduce((sum, w, i) => sum + w * expectedReturns[i], 0);
|
||||
|
||||
// Portfolio variance
|
||||
let variance = 0;
|
||||
for (let i = 0; i < weights.length; i++) {
|
||||
for (let j = 0; j < weights.length; j++) {
|
||||
variance += weights[i] * weights[j] * covarianceMatrix[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
// Mean-variance objective (maximize return, minimize variance)
|
||||
// Cost = -return + riskAversion * variance
|
||||
let cost = -expectedReturn + riskAversion * variance;
|
||||
|
||||
// Penalty for constraint violations
|
||||
const totalWeight = weights.reduce((a, b) => a + b, 0);
|
||||
if (Math.abs(totalWeight - 1.0) > 0.1) {
|
||||
cost += this.config.encoding.penaltyWeight * (totalWeight - 1.0) ** 2;
|
||||
}
|
||||
|
||||
// Cardinality constraint penalty
|
||||
const numAssets = weights.filter(w => w > 0.01).length;
|
||||
if (numAssets > this.config.portfolio.cardinalityConstraint) {
|
||||
cost += this.config.encoding.penaltyWeight * (numAssets - this.config.portfolio.cardinalityConstraint);
|
||||
}
|
||||
|
||||
return cost;
|
||||
};
|
||||
}
|
||||
|
||||
// Decode bitstring to portfolio weights
|
||||
decodeWeights(bitstring) {
|
||||
const numAssets = this.config.portfolio.numAssets;
|
||||
const bitsPerWeight = this.config.encoding.bitsPerWeight;
|
||||
const maxLevel = Math.pow(2, bitsPerWeight) - 1;
|
||||
|
||||
const weights = [];
|
||||
|
||||
for (let i = 0; i < numAssets; i++) {
|
||||
let value = 0;
|
||||
for (let b = 0; b < bitsPerWeight; b++) {
|
||||
const bitIndex = i * bitsPerWeight + b;
|
||||
if (bitstring & (1 << bitIndex)) {
|
||||
value += Math.pow(2, b);
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize to weight range
|
||||
const weight = (value / maxLevel) * this.config.portfolio.maxWeight;
|
||||
weights.push(weight);
|
||||
}
|
||||
|
||||
// Normalize to sum to 1
|
||||
const total = weights.reduce((a, b) => a + b, 0);
|
||||
if (total > 0) {
|
||||
return weights.map(w => w / total);
|
||||
}
|
||||
|
||||
return weights;
|
||||
}
|
||||
|
||||
// Run QAOA
|
||||
runQAOA(expectedReturns, covarianceMatrix) {
|
||||
const numQubits = this.config.portfolio.numAssets * this.config.encoding.bitsPerWeight;
|
||||
const costFunction = this.createCostFunction(
|
||||
expectedReturns,
|
||||
covarianceMatrix,
|
||||
this.config.portfolio.riskAversion
|
||||
);
|
||||
|
||||
// Initialize angles
|
||||
let gammas = new Array(this.config.qaoa.layers).fill(0.5);
|
||||
let betas = new Array(this.config.qaoa.layers).fill(0.3);
|
||||
|
||||
// Classical optimization loop (simplified gradient-free)
|
||||
for (let iter = 0; iter < this.config.qaoa.maxIterations; iter++) {
|
||||
const result = this.evaluateQAOA(numQubits, gammas, betas, costFunction);
|
||||
|
||||
if (result.avgCost < this.bestCost) {
|
||||
this.bestCost = result.avgCost;
|
||||
this.bestSolution = result.bestBitstring;
|
||||
}
|
||||
|
||||
this.history.push({
|
||||
iteration: iter,
|
||||
avgCost: result.avgCost,
|
||||
bestCost: this.bestCost
|
||||
});
|
||||
|
||||
// Simple parameter update (gradient-free)
|
||||
for (let l = 0; l < this.config.qaoa.layers; l++) {
|
||||
gammas[l] += (Math.random() - 0.5) * 0.1 * (1 - iter / this.config.qaoa.maxIterations);
|
||||
betas[l] += (Math.random() - 0.5) * 0.1 * (1 - iter / this.config.qaoa.maxIterations);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
bestBitstring: this.bestSolution,
|
||||
bestWeights: this.decodeWeights(this.bestSolution),
|
||||
bestCost: this.bestCost,
|
||||
history: this.history
|
||||
};
|
||||
}
|
||||
|
||||
// Evaluate QAOA for given angles
|
||||
evaluateQAOA(numQubits, gammas, betas, costFunction) {
|
||||
// Use smaller qubit count for simulation
|
||||
const effectiveQubits = Math.min(numQubits, 12);
|
||||
|
||||
const state = new QuantumState(effectiveQubits);
|
||||
state.hadamardAll();
|
||||
|
||||
// Apply QAOA layers
|
||||
for (let l = 0; l < this.config.qaoa.layers; l++) {
|
||||
state.applyCostPhase(gammas[l], costFunction);
|
||||
state.applyMixerPhase(betas[l]);
|
||||
}
|
||||
|
||||
// Sample solutions
|
||||
let totalCost = 0;
|
||||
let bestCost = Infinity;
|
||||
let bestBitstring = 0;
|
||||
|
||||
for (let shot = 0; shot < this.config.qaoa.shots; shot++) {
|
||||
const measured = state.measure();
|
||||
const cost = costFunction(measured);
|
||||
totalCost += cost;
|
||||
|
||||
if (cost < bestCost) {
|
||||
bestCost = cost;
|
||||
bestBitstring = measured;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
avgCost: totalCost / this.config.qaoa.shots,
|
||||
bestCost,
|
||||
bestBitstring
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Simulated Quantum Annealing
|
||||
class QuantumAnnealer {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.bestSolution = null;
|
||||
this.bestEnergy = Infinity;
|
||||
this.history = [];
|
||||
}
|
||||
|
||||
// QUBO formulation for portfolio optimization
|
||||
createQUBOMatrix(expectedReturns, covarianceMatrix, riskAversion) {
|
||||
const n = expectedReturns.length;
|
||||
const Q = Array(n).fill(null).map(() => Array(n).fill(0));
|
||||
|
||||
// Linear terms (returns)
|
||||
for (let i = 0; i < n; i++) {
|
||||
Q[i][i] = -expectedReturns[i];
|
||||
}
|
||||
|
||||
// Quadratic terms (covariance)
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (let j = 0; j < n; j++) {
|
||||
Q[i][j] += riskAversion * covarianceMatrix[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
return Q;
|
||||
}
|
||||
|
||||
// Calculate QUBO energy
|
||||
calculateEnergy(Q, solution) {
|
||||
let energy = 0;
|
||||
const n = Q.length;
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (let j = 0; j < n; j++) {
|
||||
energy += Q[i][j] * solution[i] * solution[j];
|
||||
}
|
||||
}
|
||||
|
||||
// Constraint: sum of weights should be close to 1
|
||||
const totalWeight = solution.reduce((a, b) => a + b, 0);
|
||||
const constraint = this.config.encoding.penaltyWeight * (totalWeight / n - 1) ** 2;
|
||||
|
||||
return energy + constraint;
|
||||
}
|
||||
|
||||
// Run simulated quantum annealing
|
||||
runAnnealing(expectedReturns, covarianceMatrix) {
|
||||
const Q = this.createQUBOMatrix(
|
||||
expectedReturns,
|
||||
covarianceMatrix,
|
||||
this.config.portfolio.riskAversion
|
||||
);
|
||||
const n = expectedReturns.length;
|
||||
|
||||
// Initialize random binary solution
|
||||
let solution = Array(n).fill(0).map(() => Math.random() < 0.5 ? 1 : 0);
|
||||
let energy = this.calculateEnergy(Q, solution);
|
||||
|
||||
this.bestSolution = [...solution];
|
||||
this.bestEnergy = energy;
|
||||
|
||||
let temp = this.config.annealing.initialTemp;
|
||||
|
||||
for (let sweep = 0; sweep < this.config.annealing.sweeps; sweep++) {
|
||||
// Quantum tunneling probability (higher at low temps in quantum annealing)
|
||||
const tunnelProb = Math.exp(-sweep / this.config.annealing.sweeps);
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
// Propose flip
|
||||
const newSolution = [...solution];
|
||||
newSolution[i] = 1 - newSolution[i];
|
||||
|
||||
// Also consider tunneling (flip multiple bits)
|
||||
if (Math.random() < tunnelProb * 0.1) {
|
||||
const j = Math.floor(Math.random() * n);
|
||||
if (j !== i) newSolution[j] = 1 - newSolution[j];
|
||||
}
|
||||
|
||||
const newEnergy = this.calculateEnergy(Q, newSolution);
|
||||
const deltaE = newEnergy - energy;
|
||||
|
||||
// Metropolis-Hastings with quantum tunneling
|
||||
if (deltaE < 0 || Math.random() < Math.exp(-deltaE / temp) + tunnelProb * 0.01) {
|
||||
solution = newSolution;
|
||||
energy = newEnergy;
|
||||
|
||||
if (energy < this.bestEnergy) {
|
||||
this.bestSolution = [...solution];
|
||||
this.bestEnergy = energy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
temp *= this.config.annealing.coolingRate;
|
||||
|
||||
if (sweep % 100 === 0) {
|
||||
this.history.push({
|
||||
sweep,
|
||||
temperature: temp,
|
||||
energy: energy,
|
||||
bestEnergy: this.bestEnergy
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Convert binary to weights
|
||||
const weights = this.bestSolution.map(b => b / this.bestSolution.reduce((a, b) => a + b, 1));
|
||||
|
||||
return {
|
||||
bestSolution: this.bestSolution,
|
||||
bestWeights: weights,
|
||||
bestEnergy: this.bestEnergy,
|
||||
history: this.history
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Classical portfolio optimizer for comparison
|
||||
class ClassicalOptimizer {
|
||||
optimize(expectedReturns, covarianceMatrix, riskAversion) {
|
||||
const n = expectedReturns.length;
|
||||
|
||||
// Simple gradient descent on Markowitz objective
|
||||
let weights = new Array(n).fill(1 / n);
|
||||
const lr = 0.01;
|
||||
|
||||
for (let iter = 0; iter < 1000; iter++) {
|
||||
// Calculate gradient
|
||||
const gradients = new Array(n).fill(0);
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
// d/dw_i of (-return + riskAversion * variance)
|
||||
gradients[i] = -expectedReturns[i];
|
||||
|
||||
for (let j = 0; j < n; j++) {
|
||||
gradients[i] += 2 * riskAversion * covarianceMatrix[i][j] * weights[j];
|
||||
}
|
||||
}
|
||||
|
||||
// Update weights
|
||||
for (let i = 0; i < n; i++) {
|
||||
weights[i] -= lr * gradients[i];
|
||||
weights[i] = Math.max(0, weights[i]); // Non-negative
|
||||
}
|
||||
|
||||
// Normalize
|
||||
const total = weights.reduce((a, b) => a + b, 0);
|
||||
weights = weights.map(w => w / total);
|
||||
}
|
||||
|
||||
// Calculate final metrics
|
||||
const expectedReturn = weights.reduce((sum, w, i) => sum + w * expectedReturns[i], 0);
|
||||
let variance = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (let j = 0; j < n; j++) {
|
||||
variance += weights[i] * weights[j] * covarianceMatrix[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
weights,
|
||||
expectedReturn,
|
||||
variance,
|
||||
sharpe: expectedReturn / Math.sqrt(variance)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Generate synthetic market data
|
||||
function generateMarketData(numAssets, seed = 42) {
|
||||
let rng = seed;
|
||||
const random = () => {
|
||||
rng = (rng * 9301 + 49297) % 233280;
|
||||
return rng / 233280;
|
||||
};
|
||||
|
||||
const assetNames = ['AAPL', 'MSFT', 'GOOGL', 'AMZN', 'NVDA', 'JPM', 'BAC', 'XOM', 'JNJ', 'WMT'];
|
||||
|
||||
// Generate expected returns (5-15% annualized)
|
||||
const expectedReturns = [];
|
||||
for (let i = 0; i < numAssets; i++) {
|
||||
expectedReturns.push(0.05 + random() * 0.10);
|
||||
}
|
||||
|
||||
// Generate covariance matrix (positive semi-definite)
|
||||
const volatilities = [];
|
||||
for (let i = 0; i < numAssets; i++) {
|
||||
volatilities.push(0.15 + random() * 0.20); // 15-35% vol
|
||||
}
|
||||
|
||||
// Correlation matrix with sector structure
|
||||
const correlations = Array(numAssets).fill(null).map(() => Array(numAssets).fill(0));
|
||||
for (let i = 0; i < numAssets; i++) {
|
||||
for (let j = 0; j < numAssets; j++) {
|
||||
if (i === j) {
|
||||
correlations[i][j] = 1;
|
||||
} else {
|
||||
// Higher correlation within "sectors"
|
||||
const sameSector = Math.floor(i / 3) === Math.floor(j / 3);
|
||||
correlations[i][j] = sameSector ? 0.5 + random() * 0.3 : 0.2 + random() * 0.3;
|
||||
correlations[j][i] = correlations[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Covariance = correlation * vol_i * vol_j
|
||||
const covarianceMatrix = Array(numAssets).fill(null).map(() => Array(numAssets).fill(0));
|
||||
for (let i = 0; i < numAssets; i++) {
|
||||
for (let j = 0; j < numAssets; j++) {
|
||||
covarianceMatrix[i][j] = correlations[i][j] * volatilities[i] * volatilities[j];
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
assetNames: assetNames.slice(0, numAssets),
|
||||
expectedReturns,
|
||||
volatilities,
|
||||
correlations,
|
||||
covarianceMatrix
|
||||
};
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('QUANTUM-INSPIRED PORTFOLIO OPTIMIZATION');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// 1. Generate market data
|
||||
console.log('1. Market Data Generation:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const numAssets = quantumConfig.portfolio.numAssets;
|
||||
const marketData = generateMarketData(numAssets);
|
||||
|
||||
console.log(` Assets: ${numAssets}`);
|
||||
console.log(` Risk aversion: ${quantumConfig.portfolio.riskAversion}`);
|
||||
console.log(` Max weight: ${(quantumConfig.portfolio.maxWeight * 100).toFixed(0)}%`);
|
||||
console.log(` Cardinality: Max ${quantumConfig.portfolio.cardinalityConstraint} assets`);
|
||||
console.log();
|
||||
|
||||
console.log(' Asset Characteristics:');
|
||||
console.log(' Asset │ E[R] │ Vol │');
|
||||
console.log('─'.repeat(70));
|
||||
for (let i = 0; i < Math.min(5, numAssets); i++) {
|
||||
console.log(` ${marketData.assetNames[i].padEnd(5)} │ ${(marketData.expectedReturns[i] * 100).toFixed(1)}% │ ${(marketData.volatilities[i] * 100).toFixed(1)}% │`);
|
||||
}
|
||||
console.log(` ... (${numAssets - 5} more assets)`);
|
||||
console.log();
|
||||
|
||||
// 2. Classical optimization (baseline)
|
||||
console.log('2. Classical Optimization (Baseline):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const classical = new ClassicalOptimizer();
|
||||
const classicalResult = classical.optimize(
|
||||
marketData.expectedReturns,
|
||||
marketData.covarianceMatrix,
|
||||
quantumConfig.portfolio.riskAversion
|
||||
);
|
||||
|
||||
console.log(` Expected Return: ${(classicalResult.expectedReturn * 100).toFixed(2)}%`);
|
||||
console.log(` Portfolio Vol: ${(Math.sqrt(classicalResult.variance) * 100).toFixed(2)}%`);
|
||||
console.log(` Sharpe Ratio: ${classicalResult.sharpe.toFixed(3)}`);
|
||||
console.log();
|
||||
|
||||
console.log(' Weights:');
|
||||
const sortedClassical = classicalResult.weights
|
||||
.map((w, i) => ({ name: marketData.assetNames[i], weight: w }))
|
||||
.sort((a, b) => b.weight - a.weight)
|
||||
.slice(0, 5);
|
||||
|
||||
for (const { name, weight } of sortedClassical) {
|
||||
const bar = '█'.repeat(Math.floor(weight * 40));
|
||||
console.log(` ${name.padEnd(5)} ${bar.padEnd(40)} ${(weight * 100).toFixed(1)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 3. Quantum Annealing
|
||||
console.log('3. Quantum Annealing Optimization:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const annealer = new QuantumAnnealer(quantumConfig);
|
||||
const annealingResult = annealer.runAnnealing(
|
||||
marketData.expectedReturns,
|
||||
marketData.covarianceMatrix
|
||||
);
|
||||
|
||||
// Calculate metrics for annealing result
|
||||
const annealingReturn = annealingResult.bestWeights.reduce(
|
||||
(sum, w, i) => sum + w * marketData.expectedReturns[i], 0
|
||||
);
|
||||
let annealingVariance = 0;
|
||||
for (let i = 0; i < numAssets; i++) {
|
||||
for (let j = 0; j < numAssets; j++) {
|
||||
annealingVariance += annealingResult.bestWeights[i] * annealingResult.bestWeights[j] *
|
||||
marketData.covarianceMatrix[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
console.log(` Expected Return: ${(annealingReturn * 100).toFixed(2)}%`);
|
||||
console.log(` Portfolio Vol: ${(Math.sqrt(annealingVariance) * 100).toFixed(2)}%`);
|
||||
console.log(` Sharpe Ratio: ${(annealingReturn / Math.sqrt(annealingVariance)).toFixed(3)}`);
|
||||
console.log(` Final Energy: ${annealingResult.bestEnergy.toFixed(4)}`);
|
||||
console.log();
|
||||
|
||||
console.log(' Binary Solution:');
|
||||
console.log(` ${annealingResult.bestSolution.join('')}`);
|
||||
console.log();
|
||||
|
||||
console.log(' Weights:');
|
||||
const sortedAnnealing = annealingResult.bestWeights
|
||||
.map((w, i) => ({ name: marketData.assetNames[i], weight: w }))
|
||||
.sort((a, b) => b.weight - a.weight)
|
||||
.filter(x => x.weight > 0.01)
|
||||
.slice(0, 5);
|
||||
|
||||
for (const { name, weight } of sortedAnnealing) {
|
||||
const bar = '█'.repeat(Math.floor(weight * 40));
|
||||
console.log(` ${name.padEnd(5)} ${bar.padEnd(40)} ${(weight * 100).toFixed(1)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 4. QAOA (simplified)
|
||||
console.log('4. QAOA Optimization (Simplified):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
// Use smaller problem for QAOA simulation
|
||||
const qaoaConfig = { ...quantumConfig, portfolio: { ...quantumConfig.portfolio, numAssets: 4 } };
|
||||
const smallMarketData = generateMarketData(4);
|
||||
|
||||
const qaoa = new QAOAOptimizer(qaoaConfig);
|
||||
const qaoaResult = qaoa.runQAOA(
|
||||
smallMarketData.expectedReturns,
|
||||
smallMarketData.covarianceMatrix
|
||||
);
|
||||
|
||||
console.log(` QAOA Layers (p): ${quantumConfig.qaoa.layers}`);
|
||||
console.log(` Measurement shots: ${quantumConfig.qaoa.shots}`);
|
||||
console.log(` Best Cost: ${qaoaResult.bestCost.toFixed(4)}`);
|
||||
console.log();
|
||||
|
||||
console.log(' QAOA Weights (4-asset subset):');
|
||||
for (let i = 0; i < qaoaResult.bestWeights.length; i++) {
|
||||
const w = qaoaResult.bestWeights[i];
|
||||
const bar = '█'.repeat(Math.floor(w * 40));
|
||||
console.log(` Asset ${i + 1} ${bar.padEnd(40)} ${(w * 100).toFixed(1)}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 5. Annealing convergence
|
||||
console.log('5. Annealing Convergence:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(' Energy vs Temperature:');
|
||||
let curve = ' ';
|
||||
const energies = annealingResult.history.map(h => h.energy);
|
||||
const minE = Math.min(...energies);
|
||||
const maxE = Math.max(...energies);
|
||||
const rangeE = maxE - minE || 1;
|
||||
|
||||
for (const h of annealingResult.history.slice(-40)) {
|
||||
const norm = 1 - (h.energy - minE) / rangeE;
|
||||
if (norm < 0.25) curve += '▁';
|
||||
else if (norm < 0.5) curve += '▃';
|
||||
else if (norm < 0.75) curve += '▅';
|
||||
else curve += '█';
|
||||
}
|
||||
console.log(curve);
|
||||
console.log(` Start Energy: ${maxE.toFixed(3)} Final: ${minE.toFixed(3)}`);
|
||||
console.log();
|
||||
|
||||
// 6. Quantum advantage discussion
|
||||
console.log('6. Quantum Advantage Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(' Problem Complexity:');
|
||||
console.log(` - Classical: O(n³) for Markowitz with constraints`);
|
||||
console.log(` - Quantum: O(√n) potential speedup via Grover`);
|
||||
console.log();
|
||||
|
||||
console.log(' This simulation demonstrates:');
|
||||
console.log(' - QUBO formulation of portfolio optimization');
|
||||
console.log(' - Quantum annealing energy landscape exploration');
|
||||
console.log(' - QAOA variational quantum-classical hybrid');
|
||||
console.log();
|
||||
|
||||
console.log(' Real quantum hardware benefits:');
|
||||
console.log(' - Combinatorial (cardinality) constraints');
|
||||
console.log(' - Large-scale problems (1000+ assets)');
|
||||
console.log(' - Non-convex objectives');
|
||||
console.log();
|
||||
|
||||
// 7. Comparison summary
|
||||
console.log('7. Method Comparison:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const classicalSharpe = classicalResult.sharpe;
|
||||
const annealingSharpe = annealingReturn / Math.sqrt(annealingVariance);
|
||||
|
||||
console.log(' Method │ Return │ Vol │ Sharpe │ Assets');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(` Classical │ ${(classicalResult.expectedReturn * 100).toFixed(1)}% │ ${(Math.sqrt(classicalResult.variance) * 100).toFixed(1)}% │ ${classicalSharpe.toFixed(3)} │ ${classicalResult.weights.filter(w => w > 0.01).length}`);
|
||||
console.log(` Quantum Anneal │ ${(annealingReturn * 100).toFixed(1)}% │ ${(Math.sqrt(annealingVariance) * 100).toFixed(1)}% │ ${annealingSharpe.toFixed(3)} │ ${annealingResult.bestWeights.filter(w => w > 0.01).length}`);
|
||||
console.log();
|
||||
|
||||
// 8. RuVector integration
|
||||
console.log('8. RuVector Vector Storage:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' Portfolio weight vectors can be stored:');
|
||||
console.log();
|
||||
console.log(` Classical weights: [${classicalResult.weights.slice(0, 4).map(w => w.toFixed(3)).join(', ')}, ...]`);
|
||||
console.log(` Quantum weights: [${annealingResult.bestWeights.slice(0, 4).map(w => w.toFixed(3)).join(', ')}, ...]`);
|
||||
console.log();
|
||||
console.log(' Use cases:');
|
||||
console.log(' - Similarity search for portfolio allocation patterns');
|
||||
console.log(' - Regime-based portfolio retrieval');
|
||||
console.log(' - Factor exposure analysis via vector operations');
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Quantum-inspired portfolio optimization completed');
|
||||
console.log('═'.repeat(70));
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
902
examples/neural-trader/exotic/reinforcement-learning-agent.js
Normal file
902
examples/neural-trader/exotic/reinforcement-learning-agent.js
Normal file
@@ -0,0 +1,902 @@
|
||||
/**
|
||||
* Reinforcement Learning Trading Agent
|
||||
*
|
||||
* EXOTIC: Deep Q-Learning for autonomous trading
|
||||
*
|
||||
* Uses @neural-trader/neural with RuVector for:
|
||||
* - Deep Q-Network (DQN) for action selection
|
||||
* - Experience replay with vector similarity
|
||||
* - Epsilon-greedy exploration
|
||||
* - Target network for stable learning
|
||||
*
|
||||
* The agent learns optimal trading actions directly from
|
||||
* market experience, without explicit strategy rules.
|
||||
*/
|
||||
|
||||
// RL Configuration
|
||||
const rlConfig = {
|
||||
// Network architecture
|
||||
network: {
|
||||
stateDim: 20, // State vector dimension
|
||||
hiddenLayers: [128, 64, 32],
|
||||
actionSpace: 5 // hold, buy_small, buy_large, sell_small, sell_large
|
||||
},
|
||||
|
||||
// Learning parameters
|
||||
learning: {
|
||||
gamma: 0.99, // Discount factor
|
||||
learningRate: 0.001,
|
||||
batchSize: 32,
|
||||
targetUpdateFreq: 100, // Steps between target network updates
|
||||
replayBufferSize: 10000
|
||||
},
|
||||
|
||||
// Exploration
|
||||
exploration: {
|
||||
epsilonStart: 1.0,
|
||||
epsilonEnd: 0.01,
|
||||
epsilonDecay: 0.995
|
||||
},
|
||||
|
||||
// Trading
|
||||
trading: {
|
||||
initialCapital: 100000,
|
||||
maxPosition: 0.5, // Max 50% of capital
|
||||
transactionCost: 0.001, // 10 bps
|
||||
slippage: 0.0005 // 5 bps
|
||||
}
|
||||
};
|
||||
|
||||
// Action definitions
|
||||
const Actions = {
|
||||
HOLD: 0,
|
||||
BUY_SMALL: 1, // 10% of available
|
||||
BUY_LARGE: 2, // 30% of available
|
||||
SELL_SMALL: 3, // 10% of position
|
||||
SELL_LARGE: 4 // 30% of position
|
||||
};
|
||||
|
||||
const ActionNames = ['HOLD', 'BUY_SMALL', 'BUY_LARGE', 'SELL_SMALL', 'SELL_LARGE'];
|
||||
|
||||
// Neural Network Layer
|
||||
class DenseLayer {
|
||||
constructor(inputDim, outputDim, activation = 'relu') {
|
||||
this.inputDim = inputDim;
|
||||
this.outputDim = outputDim;
|
||||
this.activation = activation;
|
||||
|
||||
// Xavier initialization
|
||||
const scale = Math.sqrt(2.0 / (inputDim + outputDim));
|
||||
this.weights = [];
|
||||
for (let i = 0; i < inputDim; i++) {
|
||||
const row = [];
|
||||
for (let j = 0; j < outputDim; j++) {
|
||||
row.push((Math.random() - 0.5) * 2 * scale);
|
||||
}
|
||||
this.weights.push(row);
|
||||
}
|
||||
this.bias = new Array(outputDim).fill(0).map(() => (Math.random() - 0.5) * 0.1);
|
||||
}
|
||||
|
||||
forward(input) {
|
||||
const output = new Array(this.outputDim).fill(0);
|
||||
|
||||
for (let j = 0; j < this.outputDim; j++) {
|
||||
for (let i = 0; i < this.inputDim; i++) {
|
||||
output[j] += input[i] * this.weights[i][j];
|
||||
}
|
||||
output[j] += this.bias[j];
|
||||
|
||||
// Activation
|
||||
if (this.activation === 'relu') {
|
||||
output[j] = Math.max(0, output[j]);
|
||||
}
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
// Simplified gradient update
|
||||
updateWeights(gradients, lr) {
|
||||
for (let i = 0; i < this.inputDim; i++) {
|
||||
for (let j = 0; j < this.outputDim; j++) {
|
||||
this.weights[i][j] -= lr * gradients[i][j];
|
||||
}
|
||||
}
|
||||
for (let j = 0; j < this.outputDim; j++) {
|
||||
this.bias[j] -= lr * gradients.bias[j];
|
||||
}
|
||||
}
|
||||
|
||||
copyFrom(other) {
|
||||
for (let i = 0; i < this.inputDim; i++) {
|
||||
for (let j = 0; j < this.outputDim; j++) {
|
||||
this.weights[i][j] = other.weights[i][j];
|
||||
}
|
||||
}
|
||||
for (let j = 0; j < this.outputDim; j++) {
|
||||
this.bias[j] = other.bias[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deep Q-Network
|
||||
class DQN {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
|
||||
// Build layers
|
||||
this.layers = [];
|
||||
let prevDim = config.stateDim;
|
||||
|
||||
for (const hiddenDim of config.hiddenLayers) {
|
||||
this.layers.push(new DenseLayer(prevDim, hiddenDim, 'relu'));
|
||||
prevDim = hiddenDim;
|
||||
}
|
||||
|
||||
// Output layer (no activation for Q-values)
|
||||
this.layers.push(new DenseLayer(prevDim, config.actionSpace, 'linear'));
|
||||
}
|
||||
|
||||
forward(state) {
|
||||
let x = state;
|
||||
// Store activations for backpropagation
|
||||
this.activations = [state];
|
||||
for (const layer of this.layers) {
|
||||
x = layer.forward(x);
|
||||
this.activations.push(x);
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
// Get the activation before the output layer (for gradient computation)
|
||||
getPreOutputActivation() {
|
||||
if (!this.activations || this.activations.length < 2) {
|
||||
return null;
|
||||
}
|
||||
// Return activation just before output layer
|
||||
return this.activations[this.activations.length - 2];
|
||||
}
|
||||
|
||||
copyFrom(other) {
|
||||
for (let i = 0; i < this.layers.length; i++) {
|
||||
this.layers[i].copyFrom(other.layers[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Experience Replay Buffer
|
||||
class ReplayBuffer {
|
||||
constructor(maxSize) {
|
||||
this.maxSize = maxSize;
|
||||
this.buffer = [];
|
||||
this.position = 0;
|
||||
}
|
||||
|
||||
add(experience) {
|
||||
if (this.buffer.length < this.maxSize) {
|
||||
this.buffer.push(experience);
|
||||
} else {
|
||||
this.buffer[this.position] = experience;
|
||||
}
|
||||
this.position = (this.position + 1) % this.maxSize;
|
||||
}
|
||||
|
||||
sample(batchSize) {
|
||||
const samples = [];
|
||||
const indices = new Set();
|
||||
|
||||
while (indices.size < Math.min(batchSize, this.buffer.length)) {
|
||||
indices.add(Math.floor(Math.random() * this.buffer.length));
|
||||
}
|
||||
|
||||
for (const idx of indices) {
|
||||
samples.push(this.buffer[idx]);
|
||||
}
|
||||
|
||||
return samples;
|
||||
}
|
||||
|
||||
size() {
|
||||
return this.buffer.length;
|
||||
}
|
||||
}
|
||||
|
||||
// State Encoder
|
||||
class StateEncoder {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.priceHistory = [];
|
||||
this.returnHistory = [];
|
||||
}
|
||||
|
||||
update(price) {
|
||||
this.priceHistory.push(price);
|
||||
if (this.priceHistory.length > 1) {
|
||||
const ret = (price - this.priceHistory[this.priceHistory.length - 2]) /
|
||||
this.priceHistory[this.priceHistory.length - 2];
|
||||
this.returnHistory.push(ret);
|
||||
}
|
||||
|
||||
// Keep bounded
|
||||
if (this.priceHistory.length > 100) {
|
||||
this.priceHistory.shift();
|
||||
this.returnHistory.shift();
|
||||
}
|
||||
}
|
||||
|
||||
encode(portfolio) {
|
||||
const state = [];
|
||||
|
||||
// Price-based features
|
||||
if (this.returnHistory.length >= 20) {
|
||||
// Recent returns
|
||||
for (let i = 1; i <= 5; i++) {
|
||||
state.push(this.returnHistory[this.returnHistory.length - i] * 10); // Scaled
|
||||
}
|
||||
|
||||
// Return statistics
|
||||
const recent20 = this.returnHistory.slice(-20);
|
||||
const mean = recent20.reduce((a, b) => a + b, 0) / 20;
|
||||
const variance = recent20.reduce((s, r) => s + (r - mean) ** 2, 0) / 20;
|
||||
const volatility = Math.sqrt(variance);
|
||||
|
||||
state.push(mean * 100);
|
||||
state.push(volatility * 100);
|
||||
|
||||
// Momentum
|
||||
const momentum5 = this.returnHistory.slice(-5).reduce((a, b) => a + b, 0);
|
||||
const momentum10 = this.returnHistory.slice(-10).reduce((a, b) => a + b, 0);
|
||||
const momentum20 = this.returnHistory.slice(-20).reduce((a, b) => a + b, 0);
|
||||
|
||||
state.push(momentum5 * 10);
|
||||
state.push(momentum10 * 10);
|
||||
state.push(momentum20 * 10);
|
||||
|
||||
// Price relative to moving averages
|
||||
const currentPrice = this.priceHistory[this.priceHistory.length - 1];
|
||||
const sma5 = this.priceHistory.slice(-5).reduce((a, b) => a + b, 0) / 5;
|
||||
const sma20 = this.priceHistory.slice(-20).reduce((a, b) => a + b, 0) / 20;
|
||||
|
||||
state.push((currentPrice / sma5 - 1) * 10);
|
||||
state.push((currentPrice / sma20 - 1) * 10);
|
||||
|
||||
// Trend direction
|
||||
const trend = this.returnHistory.slice(-10).filter(r => r > 0).length / 10;
|
||||
state.push(trend - 0.5);
|
||||
} else {
|
||||
// Pad with zeros
|
||||
for (let i = 0; i < 13; i++) {
|
||||
state.push(0);
|
||||
}
|
||||
}
|
||||
|
||||
// Portfolio features
|
||||
state.push(portfolio.positionPct - 0.5); // Position as fraction of capital
|
||||
state.push(portfolio.unrealizedPnL / portfolio.capital);
|
||||
state.push(portfolio.realizedPnL / portfolio.capital);
|
||||
state.push(portfolio.drawdown);
|
||||
state.push(portfolio.winRate - 0.5);
|
||||
state.push(portfolio.sharpe / 2);
|
||||
state.push(portfolio.tradeCount / 100);
|
||||
|
||||
// Ensure state dimension
|
||||
while (state.length < this.config.network.stateDim) {
|
||||
state.push(0);
|
||||
}
|
||||
|
||||
return state.slice(0, this.config.network.stateDim);
|
||||
}
|
||||
}
|
||||
|
||||
// Trading Environment
|
||||
class TradingEnvironment {
|
||||
constructor(config, priceData) {
|
||||
this.config = config;
|
||||
this.priceData = priceData;
|
||||
this.reset();
|
||||
}
|
||||
|
||||
reset() {
|
||||
this.currentStep = 50; // Start after warmup
|
||||
this.capital = this.config.trading.initialCapital;
|
||||
this.position = 0;
|
||||
this.avgCost = 0;
|
||||
this.realizedPnL = 0;
|
||||
this.trades = [];
|
||||
this.peakCapital = this.capital;
|
||||
this.returns = [];
|
||||
|
||||
return this.getState();
|
||||
}
|
||||
|
||||
getState() {
|
||||
return {
|
||||
price: this.priceData[this.currentStep].close,
|
||||
capital: this.capital,
|
||||
position: this.position,
|
||||
positionPct: this.position * this.priceData[this.currentStep].close / this.getPortfolioValue(),
|
||||
unrealizedPnL: this.getUnrealizedPnL(),
|
||||
realizedPnL: this.realizedPnL,
|
||||
drawdown: this.getDrawdown(),
|
||||
winRate: this.getWinRate(),
|
||||
sharpe: this.getSharpe(),
|
||||
tradeCount: this.trades.length
|
||||
};
|
||||
}
|
||||
|
||||
getPortfolioValue() {
|
||||
const price = this.priceData[this.currentStep].close;
|
||||
return this.capital + this.position * price;
|
||||
}
|
||||
|
||||
getUnrealizedPnL() {
|
||||
if (this.position === 0) return 0;
|
||||
const price = this.priceData[this.currentStep].close;
|
||||
return this.position * (price - this.avgCost);
|
||||
}
|
||||
|
||||
getDrawdown() {
|
||||
const value = this.getPortfolioValue();
|
||||
this.peakCapital = Math.max(this.peakCapital, value);
|
||||
return (this.peakCapital - value) / this.peakCapital;
|
||||
}
|
||||
|
||||
getWinRate() {
|
||||
const closedTrades = this.trades.filter(t => t.closed);
|
||||
if (closedTrades.length === 0) return 0.5;
|
||||
const wins = closedTrades.filter(t => t.pnl > 0).length;
|
||||
return wins / closedTrades.length;
|
||||
}
|
||||
|
||||
getSharpe() {
|
||||
if (this.returns.length < 10) return 0;
|
||||
const mean = this.returns.reduce((a, b) => a + b, 0) / this.returns.length;
|
||||
const variance = this.returns.reduce((s, r) => s + (r - mean) ** 2, 0) / this.returns.length;
|
||||
if (variance === 0) return 0;
|
||||
return mean / Math.sqrt(variance) * Math.sqrt(252);
|
||||
}
|
||||
|
||||
step(action) {
|
||||
const prevValue = this.getPortfolioValue();
|
||||
const price = this.priceData[this.currentStep].close;
|
||||
|
||||
// Execute action
|
||||
this.executeAction(action, price);
|
||||
|
||||
// Move to next step
|
||||
this.currentStep++;
|
||||
const done = this.currentStep >= this.priceData.length - 1;
|
||||
|
||||
// Calculate reward
|
||||
const newValue = this.getPortfolioValue();
|
||||
const stepReturn = (newValue - prevValue) / prevValue;
|
||||
this.returns.push(stepReturn);
|
||||
// Bound returns array to prevent memory leak
|
||||
if (this.returns.length > 1000) {
|
||||
this.returns = this.returns.slice(-500);
|
||||
}
|
||||
|
||||
// Shape reward
|
||||
let reward = stepReturn * 100; // Scale returns
|
||||
|
||||
// Penalty for excessive trading
|
||||
if (action !== Actions.HOLD) {
|
||||
reward -= 0.1;
|
||||
}
|
||||
|
||||
// Penalty for drawdown
|
||||
const drawdown = this.getDrawdown();
|
||||
if (drawdown > 0.1) {
|
||||
reward -= drawdown * 10;
|
||||
}
|
||||
|
||||
// Bonus for profitable trades
|
||||
const winRate = this.getWinRate();
|
||||
if (winRate > 0.5) {
|
||||
reward += (winRate - 0.5) * 2;
|
||||
}
|
||||
|
||||
return {
|
||||
state: this.getState(),
|
||||
reward,
|
||||
done,
|
||||
info: {
|
||||
portfolioValue: newValue,
|
||||
stepReturn,
|
||||
action: ActionNames[action]
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
executeAction(action, price) {
|
||||
const slippage = this.config.trading.slippage;
|
||||
const cost = this.config.trading.transactionCost;
|
||||
|
||||
switch (action) {
|
||||
case Actions.BUY_SMALL:
|
||||
this.buy(0.1, price * (1 + slippage + cost));
|
||||
break;
|
||||
case Actions.BUY_LARGE:
|
||||
this.buy(0.3, price * (1 + slippage + cost));
|
||||
break;
|
||||
case Actions.SELL_SMALL:
|
||||
this.sell(0.1, price * (1 - slippage - cost));
|
||||
break;
|
||||
case Actions.SELL_LARGE:
|
||||
this.sell(0.3, price * (1 - slippage - cost));
|
||||
break;
|
||||
case Actions.HOLD:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
buy(fraction, price) {
|
||||
const maxBuy = this.capital * this.config.trading.maxPosition;
|
||||
const amount = Math.min(this.capital * fraction, maxBuy);
|
||||
|
||||
if (amount < 100) return; // Min trade size
|
||||
|
||||
const shares = amount / price;
|
||||
const totalCost = this.position * this.avgCost + amount;
|
||||
const totalShares = this.position + shares;
|
||||
|
||||
this.avgCost = totalCost / totalShares;
|
||||
this.position = totalShares;
|
||||
this.capital -= amount;
|
||||
|
||||
this.trades.push({
|
||||
type: 'buy',
|
||||
shares,
|
||||
price,
|
||||
timestamp: this.currentStep,
|
||||
closed: false
|
||||
});
|
||||
}
|
||||
|
||||
sell(fraction, price) {
|
||||
if (this.position <= 0) return;
|
||||
|
||||
const sharesToSell = this.position * fraction;
|
||||
if (sharesToSell < 0.01) return;
|
||||
|
||||
const proceeds = sharesToSell * price;
|
||||
const costBasis = sharesToSell * this.avgCost;
|
||||
const tradePnL = proceeds - costBasis;
|
||||
|
||||
this.position -= sharesToSell;
|
||||
this.capital += proceeds;
|
||||
this.realizedPnL += tradePnL;
|
||||
|
||||
this.trades.push({
|
||||
type: 'sell',
|
||||
shares: sharesToSell,
|
||||
price,
|
||||
pnl: tradePnL,
|
||||
timestamp: this.currentStep,
|
||||
closed: true
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// DQN Agent
|
||||
class DQNAgent {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
|
||||
// Networks
|
||||
this.qNetwork = new DQN(config.network);
|
||||
this.targetNetwork = new DQN(config.network);
|
||||
this.targetNetwork.copyFrom(this.qNetwork);
|
||||
|
||||
// Experience replay
|
||||
this.replayBuffer = new ReplayBuffer(config.learning.replayBufferSize);
|
||||
|
||||
// Exploration
|
||||
this.epsilon = config.exploration.epsilonStart;
|
||||
|
||||
// Training stats
|
||||
this.stepCount = 0;
|
||||
this.episodeCount = 0;
|
||||
this.totalReward = 0;
|
||||
this.losses = [];
|
||||
}
|
||||
|
||||
selectAction(state) {
|
||||
// Epsilon-greedy
|
||||
if (Math.random() < this.epsilon) {
|
||||
return Math.floor(Math.random() * this.config.network.actionSpace);
|
||||
}
|
||||
|
||||
// Greedy action
|
||||
const qValues = this.qNetwork.forward(state);
|
||||
return qValues.indexOf(Math.max(...qValues));
|
||||
}
|
||||
|
||||
train() {
|
||||
if (this.replayBuffer.size() < this.config.learning.batchSize) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const batch = this.replayBuffer.sample(this.config.learning.batchSize);
|
||||
let totalLoss = 0;
|
||||
|
||||
for (const experience of batch) {
|
||||
const { state, action, reward, nextState, done } = experience;
|
||||
|
||||
// Current Q-value
|
||||
const currentQ = this.qNetwork.forward(state);
|
||||
|
||||
// Target Q-value
|
||||
let targetQ;
|
||||
if (done) {
|
||||
targetQ = reward;
|
||||
} else {
|
||||
const nextQ = this.targetNetwork.forward(nextState);
|
||||
targetQ = reward + this.config.learning.gamma * Math.max(...nextQ);
|
||||
}
|
||||
|
||||
// TD error
|
||||
const tdError = targetQ - currentQ[action];
|
||||
totalLoss += tdError ** 2;
|
||||
|
||||
// Simplified update (in production, use proper backprop)
|
||||
this.updateQNetwork(state, action, tdError);
|
||||
}
|
||||
|
||||
this.losses.push(totalLoss / batch.length);
|
||||
return totalLoss / batch.length;
|
||||
}
|
||||
|
||||
updateQNetwork(state, action, tdError) {
|
||||
const lr = this.config.learning.learningRate;
|
||||
|
||||
// Get the actual hidden layer output (activation before output layer)
|
||||
const hiddenOutput = this.qNetwork.getPreOutputActivation();
|
||||
|
||||
if (!hiddenOutput) {
|
||||
// Fallback: run forward pass to get activations
|
||||
this.qNetwork.forward(state);
|
||||
return this.updateQNetwork(state, action, tdError);
|
||||
}
|
||||
|
||||
// Update output layer using actual hidden activations
|
||||
const outputLayer = this.qNetwork.layers[this.qNetwork.layers.length - 1];
|
||||
|
||||
// Gradient for output layer: dL/dW = tdError * hiddenOutput
|
||||
for (let i = 0; i < outputLayer.inputDim; i++) {
|
||||
outputLayer.weights[i][action] += lr * tdError * hiddenOutput[i];
|
||||
}
|
||||
outputLayer.bias[action] += lr * tdError;
|
||||
|
||||
// Simplified backprop through hidden layers (gradient clipping for stability)
|
||||
const maxGrad = 1.0;
|
||||
let delta = tdError * outputLayer.weights.map(row => row[action]);
|
||||
|
||||
for (let l = this.qNetwork.layers.length - 2; l >= 0; l--) {
|
||||
const layer = this.qNetwork.layers[l];
|
||||
const prevActivation = this.qNetwork.activations[l];
|
||||
const currentActivation = this.qNetwork.activations[l + 1];
|
||||
|
||||
// ReLU derivative: 1 if activation > 0, else 0
|
||||
const reluGrad = currentActivation.map(a => a > 0 ? 1 : 0);
|
||||
|
||||
// Apply ReLU gradient
|
||||
delta = delta.map((d, i) => d * (reluGrad[i] || 0));
|
||||
|
||||
// Clip gradients for stability
|
||||
delta = delta.map(d => Math.max(-maxGrad, Math.min(maxGrad, d)));
|
||||
|
||||
// Update weights for this layer
|
||||
for (let i = 0; i < layer.inputDim; i++) {
|
||||
for (let j = 0; j < layer.outputDim; j++) {
|
||||
layer.weights[i][j] += lr * 0.1 * delta[j] * (prevActivation[i] || 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Propagate delta to previous layer
|
||||
if (l > 0) {
|
||||
const newDelta = new Array(layer.inputDim).fill(0);
|
||||
for (let i = 0; i < layer.inputDim; i++) {
|
||||
for (let j = 0; j < layer.outputDim; j++) {
|
||||
newDelta[i] += delta[j] * layer.weights[i][j];
|
||||
}
|
||||
}
|
||||
delta = newDelta;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
updateTargetNetwork() {
|
||||
this.targetNetwork.copyFrom(this.qNetwork);
|
||||
}
|
||||
|
||||
decayEpsilon() {
|
||||
this.epsilon = Math.max(
|
||||
this.config.exploration.epsilonEnd,
|
||||
this.epsilon * this.config.exploration.epsilonDecay
|
||||
);
|
||||
}
|
||||
|
||||
addExperience(state, action, reward, nextState, done) {
|
||||
this.replayBuffer.add({ state, action, reward, nextState, done });
|
||||
this.stepCount++;
|
||||
|
||||
if (this.stepCount % this.config.learning.targetUpdateFreq === 0) {
|
||||
this.updateTargetNetwork();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate synthetic price data
|
||||
function generatePriceData(n, seed = 42) {
|
||||
const data = [];
|
||||
let price = 100;
|
||||
|
||||
let rng = seed;
|
||||
const random = () => {
|
||||
rng = (rng * 9301 + 49297) % 233280;
|
||||
return rng / 233280;
|
||||
};
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
// Regime-switching dynamics
|
||||
const regime = Math.floor(i / 100) % 3;
|
||||
let drift = 0, volatility = 0.015;
|
||||
|
||||
if (regime === 0) {
|
||||
drift = 0.001;
|
||||
volatility = 0.012;
|
||||
} else if (regime === 1) {
|
||||
drift = -0.0005;
|
||||
volatility = 0.02;
|
||||
} else {
|
||||
drift = 0;
|
||||
volatility = 0.01;
|
||||
}
|
||||
|
||||
const return_ = drift + volatility * (random() + random() - 1);
|
||||
price = price * (1 + return_);
|
||||
|
||||
data.push({
|
||||
timestamp: i,
|
||||
open: price * (1 - random() * 0.002),
|
||||
high: price * (1 + random() * 0.005),
|
||||
low: price * (1 - random() * 0.005),
|
||||
close: price,
|
||||
volume: 1000000 * (0.5 + random())
|
||||
});
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('═'.repeat(70));
|
||||
console.log('REINFORCEMENT LEARNING TRADING AGENT');
|
||||
console.log('═'.repeat(70));
|
||||
console.log();
|
||||
|
||||
// 1. Generate data
|
||||
console.log('1. Environment Setup:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const priceData = generatePriceData(1000);
|
||||
const env = new TradingEnvironment(rlConfig, priceData);
|
||||
const stateEncoder = new StateEncoder(rlConfig);
|
||||
|
||||
console.log(` Price data: ${priceData.length} candles`);
|
||||
console.log(` Initial capital: $${rlConfig.trading.initialCapital.toLocaleString()}`);
|
||||
console.log(` Action space: ${rlConfig.network.actionSpace} actions`);
|
||||
console.log(` State dimension: ${rlConfig.network.stateDim}`);
|
||||
console.log();
|
||||
|
||||
// 2. Initialize agent
|
||||
console.log('2. Agent Configuration:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const agent = new DQNAgent(rlConfig);
|
||||
|
||||
console.log(` Network: ${rlConfig.network.hiddenLayers.join(' → ')} → ${rlConfig.network.actionSpace}`);
|
||||
console.log(` Learning rate: ${rlConfig.learning.learningRate}`);
|
||||
console.log(` Discount factor: ${rlConfig.learning.gamma}`);
|
||||
console.log(` Replay buffer: ${rlConfig.learning.replayBufferSize}`);
|
||||
console.log(` Batch size: ${rlConfig.learning.batchSize}`);
|
||||
console.log();
|
||||
|
||||
// 3. Training
|
||||
console.log('3. Training Loop:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const numEpisodes = 20;
|
||||
const episodeRewards = [];
|
||||
const episodeValues = [];
|
||||
|
||||
for (let episode = 0; episode < numEpisodes; episode++) {
|
||||
let state = env.reset();
|
||||
let totalReward = 0;
|
||||
let done = false;
|
||||
|
||||
// Update price history for state encoding
|
||||
for (let i = 0; i < 50; i++) {
|
||||
stateEncoder.update(priceData[i].close);
|
||||
}
|
||||
|
||||
while (!done) {
|
||||
const encodedState = stateEncoder.encode(state);
|
||||
const action = agent.selectAction(encodedState);
|
||||
|
||||
const { state: nextState, reward, done: episodeDone, info } = env.step(action);
|
||||
|
||||
stateEncoder.update(priceData[env.currentStep].close);
|
||||
const nextEncodedState = stateEncoder.encode(nextState);
|
||||
|
||||
agent.addExperience(encodedState, action, reward, nextEncodedState, episodeDone);
|
||||
|
||||
// Train
|
||||
if (agent.stepCount % 4 === 0) {
|
||||
agent.train();
|
||||
}
|
||||
|
||||
totalReward += reward;
|
||||
state = nextState;
|
||||
done = episodeDone;
|
||||
}
|
||||
|
||||
agent.decayEpsilon();
|
||||
agent.episodeCount++;
|
||||
|
||||
const finalValue = env.getPortfolioValue();
|
||||
episodeRewards.push(totalReward);
|
||||
episodeValues.push(finalValue);
|
||||
|
||||
if ((episode + 1) % 5 === 0) {
|
||||
const avgReward = episodeRewards.slice(-5).reduce((a, b) => a + b, 0) / 5;
|
||||
console.log(` Episode ${(episode + 1).toString().padStart(3)}: Reward=${avgReward.toFixed(1).padStart(7)}, Value=$${finalValue.toFixed(0).padStart(7)}, ε=${agent.epsilon.toFixed(3)}`);
|
||||
}
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 4. Final evaluation
|
||||
console.log('4. Final Evaluation:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
// Run one episode with no exploration
|
||||
agent.epsilon = 0;
|
||||
let evalState = env.reset();
|
||||
let evalDone = false;
|
||||
const evalActions = [];
|
||||
|
||||
for (let i = 0; i < 50; i++) {
|
||||
stateEncoder.update(priceData[i].close);
|
||||
}
|
||||
|
||||
while (!evalDone) {
|
||||
const encodedState = stateEncoder.encode(evalState);
|
||||
const action = agent.selectAction(encodedState);
|
||||
evalActions.push(ActionNames[action]);
|
||||
|
||||
const { state: nextState, done } = env.step(action);
|
||||
stateEncoder.update(priceData[env.currentStep].close);
|
||||
evalState = nextState;
|
||||
evalDone = done;
|
||||
}
|
||||
|
||||
const finalValue = env.getPortfolioValue();
|
||||
const totalReturn = (finalValue - rlConfig.trading.initialCapital) / rlConfig.trading.initialCapital;
|
||||
|
||||
console.log(` Final Portfolio: $${finalValue.toFixed(2)}`);
|
||||
console.log(` Total Return: ${(totalReturn * 100).toFixed(2)}%`);
|
||||
console.log(` Realized P&L: $${env.realizedPnL.toFixed(2)}`);
|
||||
console.log(` Total Trades: ${env.trades.length}`);
|
||||
console.log(` Win Rate: ${(env.getWinRate() * 100).toFixed(1)}%`);
|
||||
console.log(` Sharpe Ratio: ${env.getSharpe().toFixed(3)}`);
|
||||
console.log(` Max Drawdown: ${(env.getDrawdown() * 100).toFixed(1)}%`);
|
||||
console.log();
|
||||
|
||||
// 5. Action distribution
|
||||
console.log('5. Action Distribution:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const actionCounts = {};
|
||||
for (const action of evalActions) {
|
||||
actionCounts[action] = (actionCounts[action] || 0) + 1;
|
||||
}
|
||||
|
||||
for (const [action, count] of Object.entries(actionCounts).sort((a, b) => b[1] - a[1])) {
|
||||
const pct = (count / evalActions.length * 100).toFixed(1);
|
||||
const bar = '█'.repeat(Math.floor(count / evalActions.length * 40));
|
||||
console.log(` ${action.padEnd(12)} ${bar.padEnd(40)} ${pct}%`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 6. Learning curve
|
||||
console.log('6. Learning Curve:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(' Episode Returns:');
|
||||
let curve = ' ';
|
||||
const minReward = Math.min(...episodeRewards);
|
||||
const maxReward = Math.max(...episodeRewards);
|
||||
const range = maxReward - minReward || 1;
|
||||
|
||||
for (const reward of episodeRewards) {
|
||||
const normalized = (reward - minReward) / range;
|
||||
if (normalized < 0.25) curve += '▁';
|
||||
else if (normalized < 0.5) curve += '▃';
|
||||
else if (normalized < 0.75) curve += '▅';
|
||||
else curve += '█';
|
||||
}
|
||||
console.log(curve);
|
||||
console.log(` Min: ${minReward.toFixed(1)} Max: ${maxReward.toFixed(1)}`);
|
||||
console.log();
|
||||
|
||||
// 7. Q-value analysis
|
||||
console.log('7. Q-Value Analysis (Sample State):');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
const sampleState = stateEncoder.encode(evalState);
|
||||
const qValues = agent.qNetwork.forward(sampleState);
|
||||
|
||||
console.log(' Action Q-Values:');
|
||||
for (let i = 0; i < ActionNames.length; i++) {
|
||||
const bar = qValues[i] > 0 ? '+'.repeat(Math.min(20, Math.floor(qValues[i] * 2))) : '';
|
||||
const negBar = qValues[i] < 0 ? '-'.repeat(Math.min(20, Math.floor(Math.abs(qValues[i]) * 2))) : '';
|
||||
console.log(` ${ActionNames[i].padEnd(12)} ${qValues[i] >= 0 ? '+' : ''}${qValues[i].toFixed(3)} ${bar}${negBar}`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 8. Experience replay stats
|
||||
console.log('8. Experience Replay Statistics:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
console.log(` Buffer size: ${agent.replayBuffer.size()}`);
|
||||
console.log(` Total steps: ${agent.stepCount}`);
|
||||
console.log(` Training updates: ${agent.losses.length}`);
|
||||
if (agent.losses.length > 0) {
|
||||
const avgLoss = agent.losses.reduce((a, b) => a + b, 0) / agent.losses.length;
|
||||
console.log(` Average loss: ${avgLoss.toFixed(4)}`);
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 9. Trading strategy emerged
|
||||
console.log('9. Emergent Strategy Analysis:');
|
||||
console.log('─'.repeat(70));
|
||||
|
||||
// Analyze when agent buys vs sells
|
||||
const buyActions = evalActions.filter(a => a.includes('BUY')).length;
|
||||
const sellActions = evalActions.filter(a => a.includes('SELL')).length;
|
||||
const holdActions = evalActions.filter(a => a === 'HOLD').length;
|
||||
|
||||
console.log(' The agent learned to:');
|
||||
if (holdActions > evalActions.length * 0.5) {
|
||||
console.log(' - Be patient (primarily holding positions)');
|
||||
}
|
||||
if (buyActions > sellActions) {
|
||||
console.log(' - Favor long positions (more buys than sells)');
|
||||
} else if (sellActions > buyActions) {
|
||||
console.log(' - Manage risk actively (frequent profit taking)');
|
||||
}
|
||||
console.log();
|
||||
|
||||
// 10. RuVector integration
|
||||
console.log('10. RuVector Vector Storage:');
|
||||
console.log('─'.repeat(70));
|
||||
console.log(' State vectors can be stored for similarity search:');
|
||||
console.log();
|
||||
console.log(` State vector sample (first 5 dims):`);
|
||||
console.log(` [${sampleState.slice(0, 5).map(v => v.toFixed(4)).join(', ')}]`);
|
||||
console.log();
|
||||
console.log(' Use cases:');
|
||||
console.log(' - Find similar market states from history');
|
||||
console.log(' - Experience replay with prioritized sampling');
|
||||
console.log(' - State clustering for interpretability');
|
||||
console.log();
|
||||
|
||||
console.log('═'.repeat(70));
|
||||
console.log('Reinforcement learning agent training completed');
|
||||
console.log('═'.repeat(70));
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
Reference in New Issue
Block a user