Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
160
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.d.ts
vendored
Normal file
160
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.d.ts
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
/**
|
||||
* Agent Coordinator - Main coordination logic for distributed ruvector agents
|
||||
*
|
||||
* Handles:
|
||||
* - Agent initialization and registration
|
||||
* - Task distribution across regions
|
||||
* - Load balancing logic
|
||||
* - Health monitoring
|
||||
* - Failover coordination
|
||||
*/
|
||||
import { EventEmitter } from 'events';
|
||||
export interface AgentMetrics {
|
||||
agentId: string;
|
||||
region: string;
|
||||
cpuUsage: number;
|
||||
memoryUsage: number;
|
||||
activeStreams: number;
|
||||
queryLatency: number;
|
||||
timestamp: number;
|
||||
healthy: boolean;
|
||||
}
|
||||
export interface Task {
|
||||
id: string;
|
||||
type: 'query' | 'index' | 'sync' | 'maintenance';
|
||||
payload: any;
|
||||
priority: number;
|
||||
region?: string;
|
||||
retries: number;
|
||||
maxRetries: number;
|
||||
createdAt: number;
|
||||
}
|
||||
export interface AgentRegistration {
|
||||
agentId: string;
|
||||
region: string;
|
||||
endpoint: string;
|
||||
capabilities: string[];
|
||||
capacity: number;
|
||||
registeredAt: number;
|
||||
}
|
||||
export interface CoordinatorConfig {
|
||||
maxAgentsPerRegion: number;
|
||||
healthCheckInterval: number;
|
||||
taskTimeout: number;
|
||||
retryBackoffBase: number;
|
||||
retryBackoffMax: number;
|
||||
loadBalancingStrategy: 'round-robin' | 'least-connections' | 'weighted' | 'adaptive';
|
||||
failoverThreshold: number;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
}
|
||||
export declare class AgentCoordinator extends EventEmitter {
|
||||
private config;
|
||||
private agents;
|
||||
private agentMetrics;
|
||||
private taskQueue;
|
||||
private activeTasks;
|
||||
private healthCheckTimer?;
|
||||
private taskDistributionTimer?;
|
||||
private regionLoadIndex;
|
||||
private circuitBreakers;
|
||||
constructor(config: CoordinatorConfig);
|
||||
/**
|
||||
* Initialize coordinator with claude-flow hooks
|
||||
*/
|
||||
private initializeCoordinator;
|
||||
/**
|
||||
* Register a new agent in the coordination system
|
||||
*/
|
||||
registerAgent(registration: AgentRegistration): Promise<void>;
|
||||
/**
|
||||
* Unregister an agent from the coordination system
|
||||
*/
|
||||
unregisterAgent(agentId: string): Promise<void>;
|
||||
/**
|
||||
* Submit a task for distributed execution
|
||||
*/
|
||||
submitTask(task: Omit<Task, 'id' | 'retries' | 'createdAt'>): Promise<string>;
|
||||
/**
|
||||
* Insert task into queue maintaining priority order
|
||||
*/
|
||||
private insertTaskByPriority;
|
||||
/**
|
||||
* Distribute tasks to agents using configured load balancing strategy
|
||||
*/
|
||||
private distributeNextTask;
|
||||
/**
|
||||
* Select best agent for task based on load balancing strategy
|
||||
*/
|
||||
private selectAgent;
|
||||
/**
|
||||
* Round-robin load balancing
|
||||
*/
|
||||
private selectAgentRoundRobin;
|
||||
/**
|
||||
* Least connections load balancing
|
||||
*/
|
||||
private selectAgentLeastConnections;
|
||||
/**
|
||||
* Weighted load balancing based on agent capacity
|
||||
*/
|
||||
private selectAgentWeighted;
|
||||
/**
|
||||
* Adaptive load balancing based on real-time metrics
|
||||
*/
|
||||
private selectAgentAdaptive;
|
||||
/**
|
||||
* Calculate adaptive score for agent selection
|
||||
*/
|
||||
private calculateAdaptiveScore;
|
||||
/**
|
||||
* Execute task with exponential backoff retry logic
|
||||
*/
|
||||
private executeTaskWithRetry;
|
||||
/**
|
||||
* Execute task on specific agent (placeholder for actual implementation)
|
||||
*/
|
||||
private executeTaskOnAgent;
|
||||
/**
|
||||
* Handle task failure
|
||||
*/
|
||||
private handleTaskFailure;
|
||||
/**
|
||||
* Redistribute task to another agent (failover)
|
||||
*/
|
||||
private redistributeTask;
|
||||
/**
|
||||
* Failover task when agent is unavailable
|
||||
*/
|
||||
private failoverTask;
|
||||
/**
|
||||
* Update agent metrics
|
||||
*/
|
||||
updateAgentMetrics(metrics: AgentMetrics): void;
|
||||
/**
|
||||
* Start health monitoring loop
|
||||
*/
|
||||
private startHealthMonitoring;
|
||||
/**
|
||||
* Perform health checks on all agents
|
||||
*/
|
||||
private performHealthChecks;
|
||||
/**
|
||||
* Start task distribution loop
|
||||
*/
|
||||
private startTaskDistribution;
|
||||
/**
|
||||
* Get coordinator status
|
||||
*/
|
||||
getStatus(): {
|
||||
totalAgents: number;
|
||||
healthyAgents: number;
|
||||
queuedTasks: number;
|
||||
activeTasks: number;
|
||||
regionDistribution: Record<string, number>;
|
||||
};
|
||||
/**
|
||||
* Shutdown coordinator gracefully
|
||||
*/
|
||||
shutdown(): Promise<void>;
|
||||
}
|
||||
//# sourceMappingURL=agent-coordinator.d.ts.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.d.ts.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"agent-coordinator.d.ts","sourceRoot":"","sources":["agent-coordinator.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAEH,OAAO,EAAE,YAAY,EAAE,MAAM,QAAQ,CAAC;AAMtC,MAAM,WAAW,YAAY;IAC3B,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,aAAa,EAAE,MAAM,CAAC;IACtB,YAAY,EAAE,MAAM,CAAC;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,OAAO,CAAC;CAClB;AAED,MAAM,WAAW,IAAI;IACnB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,OAAO,GAAG,OAAO,GAAG,MAAM,GAAG,aAAa,CAAC;IACjD,OAAO,EAAE,GAAG,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;IACjB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,UAAU,EAAE,MAAM,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,WAAW,iBAAiB;IAChC,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,MAAM,CAAC;IACjB,YAAY,EAAE,MAAM,EAAE,CAAC;IACvB,QAAQ,EAAE,MAAM,CAAC;IACjB,YAAY,EAAE,MAAM,CAAC;CACtB;AAED,MAAM,WAAW,iBAAiB;IAChC,kBAAkB,EAAE,MAAM,CAAC;IAC3B,mBAAmB,EAAE,MAAM,CAAC;IAC5B,WAAW,EAAE,MAAM,CAAC;IACpB,gBAAgB,EAAE,MAAM,CAAC;IACzB,eAAe,EAAE,MAAM,CAAC;IACxB,qBAAqB,EAAE,aAAa,GAAG,mBAAmB,GAAG,UAAU,GAAG,UAAU,CAAC;IACrF,iBAAiB,EAAE,MAAM,CAAC;IAC1B,qBAAqB,EAAE,OAAO,CAAC;CAChC;AAED,qBAAa,gBAAiB,SAAQ,YAAY;IAUpC,OAAO,CAAC,MAAM;IAT1B,OAAO,CAAC,MAAM,CAA6C;IAC3D,OAAO,CAAC,YAAY,CAAwC;IAC5D,OAAO,CAAC,SAAS,CAAc;IAC/B,OAAO,CAAC,WAAW,CAAgC;IACnD,OAAO,CAAC,gBAAgB,CAAC,CAAiB;IAC1C,OAAO,CAAC,qBAAqB,CAAC,CAAiB;IAC/C,OAAO,CAAC,eAAe,CAAkC;IACzD,OAAO,CAAC,eAAe,CAA0C;gBAE7C,MAAM,EAAE,iBAAiB;IAK7C;;OAEG;YACW,qBAAqB;IAwBnC;;OAEG;IACG,aAAa,CAAC,YAAY,EAAE,iBAAiB,GAAG,OAAO,CAAC,IAAI,CAAC;IAwCnE;;OAEG;IACG,eAAe,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAwBrD;;OAEG;IACG,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,IAAI,GAAG,SAAS,GAAG,WAAW,CAAC,GAAG,OAAO,CAAC,MAAM,CAAC;IAkBnF;;OAEG;IACH,OAAO,CAAC,oBAAoB;IAS5B;;OAEG;YACW,kBAAkB;IAyChC;;OAEG;YACW,WAAW;IA0BzB;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAW7B;;OAEG;IACH,OAAO,CAAC,2BAA2B;IAWnC;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAY3B;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAe3B;;OAEG;IACH,OAAO,CAAC,sBAAsB;IAS9B;;OAEG;YACW,oBAAoB;IA+ClC;;OAEG;YACW,kBAAkB;IAkBhC;;OAEG;YACW,iBAAiB;IAa/B;;OAEG;YACW,gBAAgB;IAU9B;;OAEG;YACW,YAAY;IAS1B;;OAEG;IACH,kBAAkB,CAAC,OAAO,EAAE,YAAY,GAAG,IAAI;IAgB/C;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAM7B;;OAEG;YACW,mBAAmB;IA0BjC;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAQ7B;;OAEG;IACH,SAAS,IAAI;QACX,WAAW,EAAE,MAAM,CAAC;QACpB,aAAa,EAAE,MAAM,CAAC;QACtB,WAAW,EAAE,MAAM,CAAC;QACpB,WAAW,EAAE,MAAM,CAAC;QACpB,kBAAkB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;KAC5C;IAmBD;;OAEG;IACG,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;CAwBhC"}
|
||||
466
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.js
vendored
Normal file
466
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.js
vendored
Normal file
@@ -0,0 +1,466 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Agent Coordinator - Main coordination logic for distributed ruvector agents
|
||||
*
|
||||
* Handles:
|
||||
* - Agent initialization and registration
|
||||
* - Task distribution across regions
|
||||
* - Load balancing logic
|
||||
* - Health monitoring
|
||||
* - Failover coordination
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AgentCoordinator = void 0;
|
||||
const events_1 = require("events");
|
||||
const child_process_1 = require("child_process");
|
||||
const util_1 = require("util");
|
||||
const execAsync = (0, util_1.promisify)(child_process_1.exec);
|
||||
class AgentCoordinator extends events_1.EventEmitter {
|
||||
constructor(config) {
|
||||
super();
|
||||
this.config = config;
|
||||
this.agents = new Map();
|
||||
this.agentMetrics = new Map();
|
||||
this.taskQueue = [];
|
||||
this.activeTasks = new Map();
|
||||
this.regionLoadIndex = new Map();
|
||||
this.circuitBreakers = new Map();
|
||||
this.initializeCoordinator();
|
||||
}
|
||||
/**
|
||||
* Initialize coordinator with claude-flow hooks
|
||||
*/
|
||||
async initializeCoordinator() {
|
||||
console.log('[AgentCoordinator] Initializing coordinator...');
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Pre-task hook for coordination initialization
|
||||
await execAsync(`npx claude-flow@alpha hooks pre-task --description "Initialize agent coordinator"`);
|
||||
console.log('[AgentCoordinator] Claude-flow pre-task hook executed');
|
||||
}
|
||||
catch (error) {
|
||||
console.warn('[AgentCoordinator] Claude-flow hooks not available:', error);
|
||||
}
|
||||
}
|
||||
// Start health monitoring
|
||||
this.startHealthMonitoring();
|
||||
// Start task distribution
|
||||
this.startTaskDistribution();
|
||||
this.emit('coordinator:initialized');
|
||||
}
|
||||
/**
|
||||
* Register a new agent in the coordination system
|
||||
*/
|
||||
async registerAgent(registration) {
|
||||
console.log(`[AgentCoordinator] Registering agent: ${registration.agentId} in ${registration.region}`);
|
||||
// Check if region has capacity
|
||||
const regionAgents = Array.from(this.agents.values()).filter(a => a.region === registration.region);
|
||||
if (regionAgents.length >= this.config.maxAgentsPerRegion) {
|
||||
throw new Error(`Region ${registration.region} has reached max agent capacity`);
|
||||
}
|
||||
this.agents.set(registration.agentId, registration);
|
||||
// Initialize circuit breaker for agent
|
||||
this.circuitBreakers.set(registration.agentId, new CircuitBreaker({
|
||||
threshold: this.config.failoverThreshold,
|
||||
timeout: this.config.taskTimeout,
|
||||
}));
|
||||
// Initialize metrics
|
||||
this.agentMetrics.set(registration.agentId, {
|
||||
agentId: registration.agentId,
|
||||
region: registration.region,
|
||||
cpuUsage: 0,
|
||||
memoryUsage: 0,
|
||||
activeStreams: 0,
|
||||
queryLatency: 0,
|
||||
timestamp: Date.now(),
|
||||
healthy: true,
|
||||
});
|
||||
this.emit('agent:registered', registration);
|
||||
console.log(`[AgentCoordinator] Agent ${registration.agentId} registered successfully`);
|
||||
}
|
||||
/**
|
||||
* Unregister an agent from the coordination system
|
||||
*/
|
||||
async unregisterAgent(agentId) {
|
||||
console.log(`[AgentCoordinator] Unregistering agent: ${agentId}`);
|
||||
const agent = this.agents.get(agentId);
|
||||
if (!agent) {
|
||||
throw new Error(`Agent ${agentId} not found`);
|
||||
}
|
||||
// Redistribute active tasks
|
||||
const agentTasks = Array.from(this.activeTasks.values()).filter(task => task.region === agent.region);
|
||||
for (const task of agentTasks) {
|
||||
await this.redistributeTask(task);
|
||||
}
|
||||
this.agents.delete(agentId);
|
||||
this.agentMetrics.delete(agentId);
|
||||
this.circuitBreakers.delete(agentId);
|
||||
this.emit('agent:unregistered', { agentId });
|
||||
}
|
||||
/**
|
||||
* Submit a task for distributed execution
|
||||
*/
|
||||
async submitTask(task) {
|
||||
const fullTask = {
|
||||
...task,
|
||||
id: `task-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
retries: 0,
|
||||
createdAt: Date.now(),
|
||||
};
|
||||
console.log(`[AgentCoordinator] Submitting task: ${fullTask.id} (type: ${fullTask.type})`);
|
||||
// Add to queue based on priority
|
||||
this.insertTaskByPriority(fullTask);
|
||||
this.emit('task:submitted', fullTask);
|
||||
return fullTask.id;
|
||||
}
|
||||
/**
|
||||
* Insert task into queue maintaining priority order
|
||||
*/
|
||||
insertTaskByPriority(task) {
|
||||
let insertIndex = this.taskQueue.findIndex(t => t.priority < task.priority);
|
||||
if (insertIndex === -1) {
|
||||
this.taskQueue.push(task);
|
||||
}
|
||||
else {
|
||||
this.taskQueue.splice(insertIndex, 0, task);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Distribute tasks to agents using configured load balancing strategy
|
||||
*/
|
||||
async distributeNextTask() {
|
||||
if (this.taskQueue.length === 0)
|
||||
return;
|
||||
const task = this.taskQueue.shift();
|
||||
try {
|
||||
// Select agent based on load balancing strategy
|
||||
const agent = await this.selectAgent(task);
|
||||
if (!agent) {
|
||||
console.warn(`[AgentCoordinator] No available agent for task ${task.id}, requeuing`);
|
||||
this.insertTaskByPriority(task);
|
||||
return;
|
||||
}
|
||||
// Check circuit breaker
|
||||
const circuitBreaker = this.circuitBreakers.get(agent.agentId);
|
||||
if (circuitBreaker && !circuitBreaker.canExecute()) {
|
||||
console.warn(`[AgentCoordinator] Circuit breaker open for agent ${agent.agentId}`);
|
||||
await this.failoverTask(task, agent.agentId);
|
||||
return;
|
||||
}
|
||||
// Assign task to agent
|
||||
this.activeTasks.set(task.id, { ...task, region: agent.region });
|
||||
this.emit('task:assigned', {
|
||||
taskId: task.id,
|
||||
agentId: agent.agentId,
|
||||
region: agent.region,
|
||||
});
|
||||
// Execute task with timeout and retry logic
|
||||
await this.executeTaskWithRetry(task, agent);
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[AgentCoordinator] Error distributing task ${task.id}:`, error);
|
||||
await this.handleTaskFailure(task, error);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Select best agent for task based on load balancing strategy
|
||||
*/
|
||||
async selectAgent(task) {
|
||||
const availableAgents = Array.from(this.agents.values()).filter(agent => {
|
||||
const metrics = this.agentMetrics.get(agent.agentId);
|
||||
return metrics?.healthy && (!task.region || agent.region === task.region);
|
||||
});
|
||||
if (availableAgents.length === 0)
|
||||
return null;
|
||||
switch (this.config.loadBalancingStrategy) {
|
||||
case 'round-robin':
|
||||
return this.selectAgentRoundRobin(availableAgents, task);
|
||||
case 'least-connections':
|
||||
return this.selectAgentLeastConnections(availableAgents);
|
||||
case 'weighted':
|
||||
return this.selectAgentWeighted(availableAgents);
|
||||
case 'adaptive':
|
||||
return this.selectAgentAdaptive(availableAgents);
|
||||
default:
|
||||
return availableAgents[0];
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Round-robin load balancing
|
||||
*/
|
||||
selectAgentRoundRobin(agents, task) {
|
||||
const region = task.region || 'default';
|
||||
const currentIndex = this.regionLoadIndex.get(region) || 0;
|
||||
const regionAgents = agents.filter(a => !task.region || a.region === task.region);
|
||||
const selectedAgent = regionAgents[currentIndex % regionAgents.length];
|
||||
this.regionLoadIndex.set(region, (currentIndex + 1) % regionAgents.length);
|
||||
return selectedAgent;
|
||||
}
|
||||
/**
|
||||
* Least connections load balancing
|
||||
*/
|
||||
selectAgentLeastConnections(agents) {
|
||||
return agents.reduce((best, agent) => {
|
||||
const bestMetrics = this.agentMetrics.get(best.agentId);
|
||||
const agentMetrics = this.agentMetrics.get(agent.agentId);
|
||||
return (agentMetrics?.activeStreams || 0) < (bestMetrics?.activeStreams || 0)
|
||||
? agent
|
||||
: best;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Weighted load balancing based on agent capacity
|
||||
*/
|
||||
selectAgentWeighted(agents) {
|
||||
const totalCapacity = agents.reduce((sum, a) => sum + a.capacity, 0);
|
||||
let random = Math.random() * totalCapacity;
|
||||
for (const agent of agents) {
|
||||
random -= agent.capacity;
|
||||
if (random <= 0)
|
||||
return agent;
|
||||
}
|
||||
return agents[agents.length - 1];
|
||||
}
|
||||
/**
|
||||
* Adaptive load balancing based on real-time metrics
|
||||
*/
|
||||
selectAgentAdaptive(agents) {
|
||||
return agents.reduce((best, agent) => {
|
||||
const bestMetrics = this.agentMetrics.get(best.agentId);
|
||||
const agentMetrics = this.agentMetrics.get(agent.agentId);
|
||||
if (!bestMetrics || !agentMetrics)
|
||||
return best;
|
||||
// Score based on: low CPU, low memory, low streams, low latency
|
||||
const bestScore = this.calculateAdaptiveScore(bestMetrics);
|
||||
const agentScore = this.calculateAdaptiveScore(agentMetrics);
|
||||
return agentScore > bestScore ? agent : best;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Calculate adaptive score for agent selection
|
||||
*/
|
||||
calculateAdaptiveScore(metrics) {
|
||||
return ((100 - metrics.cpuUsage) * 0.3 +
|
||||
(100 - metrics.memoryUsage) * 0.3 +
|
||||
(1000 - metrics.activeStreams) / 10 * 0.2 +
|
||||
(1000 - metrics.queryLatency) / 10 * 0.2);
|
||||
}
|
||||
/**
|
||||
* Execute task with exponential backoff retry logic
|
||||
*/
|
||||
async executeTaskWithRetry(task, agent) {
|
||||
const maxRetries = task.maxRetries || 3;
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
const timeout = this.config.taskTimeout;
|
||||
// Simulate task execution (replace with actual agent communication)
|
||||
await this.executeTaskOnAgent(task, agent, timeout);
|
||||
// Task successful
|
||||
this.activeTasks.delete(task.id);
|
||||
this.emit('task:completed', { taskId: task.id, agentId: agent.agentId });
|
||||
// Record success in circuit breaker
|
||||
this.circuitBreakers.get(agent.agentId)?.recordSuccess();
|
||||
return;
|
||||
}
|
||||
catch (error) {
|
||||
task.retries = attempt + 1;
|
||||
if (attempt < maxRetries) {
|
||||
// Calculate backoff delay
|
||||
const backoff = Math.min(this.config.retryBackoffBase * Math.pow(2, attempt), this.config.retryBackoffMax);
|
||||
console.warn(`[AgentCoordinator] Task ${task.id} attempt ${attempt + 1} failed, retrying in ${backoff}ms`, error);
|
||||
await new Promise(resolve => setTimeout(resolve, backoff));
|
||||
}
|
||||
else {
|
||||
// Max retries exceeded
|
||||
console.error(`[AgentCoordinator] Task ${task.id} failed after ${maxRetries} attempts`);
|
||||
await this.handleTaskFailure(task, error);
|
||||
// Record failure in circuit breaker
|
||||
this.circuitBreakers.get(agent.agentId)?.recordFailure();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Execute task on specific agent (placeholder for actual implementation)
|
||||
*/
|
||||
async executeTaskOnAgent(task, agent, timeout) {
|
||||
// This would be replaced with actual HTTP/gRPC call to agent endpoint
|
||||
// For now, simulate execution
|
||||
return new Promise((resolve, reject) => {
|
||||
const timer = setTimeout(() => reject(new Error('Task timeout')), timeout);
|
||||
// Simulate task execution
|
||||
setTimeout(() => {
|
||||
clearTimeout(timer);
|
||||
resolve();
|
||||
}, Math.random() * 100);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Handle task failure
|
||||
*/
|
||||
async handleTaskFailure(task, error) {
|
||||
this.activeTasks.delete(task.id);
|
||||
this.emit('task:failed', {
|
||||
taskId: task.id,
|
||||
error: error.message,
|
||||
retries: task.retries,
|
||||
});
|
||||
// Could implement dead letter queue here
|
||||
console.error(`[AgentCoordinator] Task ${task.id} failed permanently:`, error);
|
||||
}
|
||||
/**
|
||||
* Redistribute task to another agent (failover)
|
||||
*/
|
||||
async redistributeTask(task) {
|
||||
console.log(`[AgentCoordinator] Redistributing task ${task.id}`);
|
||||
// Remove region preference to allow any region
|
||||
const redistributedTask = { ...task, region: undefined };
|
||||
this.insertTaskByPriority(redistributedTask);
|
||||
this.emit('task:redistributed', { taskId: task.id });
|
||||
}
|
||||
/**
|
||||
* Failover task when agent is unavailable
|
||||
*/
|
||||
async failoverTask(task, failedAgentId) {
|
||||
console.log(`[AgentCoordinator] Failing over task ${task.id} from agent ${failedAgentId}`);
|
||||
this.activeTasks.delete(task.id);
|
||||
await this.redistributeTask(task);
|
||||
this.emit('task:failover', { taskId: task.id, failedAgentId });
|
||||
}
|
||||
/**
|
||||
* Update agent metrics
|
||||
*/
|
||||
updateAgentMetrics(metrics) {
|
||||
this.agentMetrics.set(metrics.agentId, {
|
||||
...metrics,
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
// Check if agent health changed
|
||||
const previousMetrics = this.agentMetrics.get(metrics.agentId);
|
||||
if (previousMetrics && previousMetrics.healthy !== metrics.healthy) {
|
||||
this.emit('agent:health-changed', {
|
||||
agentId: metrics.agentId,
|
||||
healthy: metrics.healthy,
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Start health monitoring loop
|
||||
*/
|
||||
startHealthMonitoring() {
|
||||
this.healthCheckTimer = setInterval(() => {
|
||||
this.performHealthChecks();
|
||||
}, this.config.healthCheckInterval);
|
||||
}
|
||||
/**
|
||||
* Perform health checks on all agents
|
||||
*/
|
||||
async performHealthChecks() {
|
||||
const now = Date.now();
|
||||
for (const [agentId, metrics] of this.agentMetrics.entries()) {
|
||||
// Check if metrics are stale (no update in 2x health check interval)
|
||||
const staleThreshold = this.config.healthCheckInterval * 2;
|
||||
const isStale = now - metrics.timestamp > staleThreshold;
|
||||
if (isStale && metrics.healthy) {
|
||||
console.warn(`[AgentCoordinator] Agent ${agentId} marked unhealthy (stale metrics)`);
|
||||
this.agentMetrics.set(agentId, {
|
||||
...metrics,
|
||||
healthy: false,
|
||||
timestamp: now,
|
||||
});
|
||||
this.emit('agent:health-changed', {
|
||||
agentId,
|
||||
healthy: false,
|
||||
reason: 'stale_metrics',
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Start task distribution loop
|
||||
*/
|
||||
startTaskDistribution() {
|
||||
this.taskDistributionTimer = setInterval(() => {
|
||||
this.distributeNextTask().catch(error => {
|
||||
console.error('[AgentCoordinator] Error in task distribution:', error);
|
||||
});
|
||||
}, 100); // Distribute tasks every 100ms
|
||||
}
|
||||
/**
|
||||
* Get coordinator status
|
||||
*/
|
||||
getStatus() {
|
||||
const healthyAgents = Array.from(this.agentMetrics.values()).filter(m => m.healthy).length;
|
||||
const regionDistribution = {};
|
||||
for (const agent of this.agents.values()) {
|
||||
regionDistribution[agent.region] = (regionDistribution[agent.region] || 0) + 1;
|
||||
}
|
||||
return {
|
||||
totalAgents: this.agents.size,
|
||||
healthyAgents,
|
||||
queuedTasks: this.taskQueue.length,
|
||||
activeTasks: this.activeTasks.size,
|
||||
regionDistribution,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Shutdown coordinator gracefully
|
||||
*/
|
||||
async shutdown() {
|
||||
console.log('[AgentCoordinator] Shutting down coordinator...');
|
||||
if (this.healthCheckTimer) {
|
||||
clearInterval(this.healthCheckTimer);
|
||||
}
|
||||
if (this.taskDistributionTimer) {
|
||||
clearInterval(this.taskDistributionTimer);
|
||||
}
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Post-task hook
|
||||
await execAsync(`npx claude-flow@alpha hooks post-task --task-id "coordinator-shutdown"`);
|
||||
}
|
||||
catch (error) {
|
||||
console.warn('[AgentCoordinator] Error executing post-task hook:', error);
|
||||
}
|
||||
}
|
||||
this.emit('coordinator:shutdown');
|
||||
}
|
||||
}
|
||||
exports.AgentCoordinator = AgentCoordinator;
|
||||
/**
|
||||
* Circuit Breaker for agent fault tolerance
|
||||
*/
|
||||
class CircuitBreaker {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.failures = 0;
|
||||
this.lastFailureTime = 0;
|
||||
this.state = 'closed';
|
||||
}
|
||||
canExecute() {
|
||||
if (this.state === 'closed')
|
||||
return true;
|
||||
if (this.state === 'open') {
|
||||
// Check if timeout has passed
|
||||
if (Date.now() - this.lastFailureTime > this.config.timeout) {
|
||||
this.state = 'half-open';
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// half-open: allow one request
|
||||
return true;
|
||||
}
|
||||
recordSuccess() {
|
||||
this.failures = 0;
|
||||
this.state = 'closed';
|
||||
}
|
||||
recordFailure() {
|
||||
this.failures++;
|
||||
this.lastFailureTime = Date.now();
|
||||
if (this.failures >= this.config.threshold) {
|
||||
this.state = 'open';
|
||||
}
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=agent-coordinator.js.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.js.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
632
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.ts
vendored
Normal file
632
vendor/ruvector/npm/packages/agentic-integration/agent-coordinator.ts
vendored
Normal file
@@ -0,0 +1,632 @@
|
||||
/**
|
||||
* Agent Coordinator - Main coordination logic for distributed ruvector agents
|
||||
*
|
||||
* Handles:
|
||||
* - Agent initialization and registration
|
||||
* - Task distribution across regions
|
||||
* - Load balancing logic
|
||||
* - Health monitoring
|
||||
* - Failover coordination
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
export interface AgentMetrics {
|
||||
agentId: string;
|
||||
region: string;
|
||||
cpuUsage: number;
|
||||
memoryUsage: number;
|
||||
activeStreams: number;
|
||||
queryLatency: number;
|
||||
timestamp: number;
|
||||
healthy: boolean;
|
||||
}
|
||||
|
||||
export interface Task {
|
||||
id: string;
|
||||
type: 'query' | 'index' | 'sync' | 'maintenance';
|
||||
payload: any;
|
||||
priority: number;
|
||||
region?: string;
|
||||
retries: number;
|
||||
maxRetries: number;
|
||||
createdAt: number;
|
||||
}
|
||||
|
||||
export interface AgentRegistration {
|
||||
agentId: string;
|
||||
region: string;
|
||||
endpoint: string;
|
||||
capabilities: string[];
|
||||
capacity: number;
|
||||
registeredAt: number;
|
||||
}
|
||||
|
||||
export interface CoordinatorConfig {
|
||||
maxAgentsPerRegion: number;
|
||||
healthCheckInterval: number;
|
||||
taskTimeout: number;
|
||||
retryBackoffBase: number;
|
||||
retryBackoffMax: number;
|
||||
loadBalancingStrategy: 'round-robin' | 'least-connections' | 'weighted' | 'adaptive';
|
||||
failoverThreshold: number;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
}
|
||||
|
||||
export class AgentCoordinator extends EventEmitter {
|
||||
private agents: Map<string, AgentRegistration> = new Map();
|
||||
private agentMetrics: Map<string, AgentMetrics> = new Map();
|
||||
private taskQueue: Task[] = [];
|
||||
private activeTasks: Map<string, Task> = new Map();
|
||||
private healthCheckTimer?: NodeJS.Timeout;
|
||||
private taskDistributionTimer?: NodeJS.Timeout;
|
||||
private regionLoadIndex: Map<string, number> = new Map();
|
||||
private circuitBreakers: Map<string, CircuitBreaker> = new Map();
|
||||
|
||||
constructor(private config: CoordinatorConfig) {
|
||||
super();
|
||||
this.initializeCoordinator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize coordinator with claude-flow hooks
|
||||
*/
|
||||
private async initializeCoordinator(): Promise<void> {
|
||||
console.log('[AgentCoordinator] Initializing coordinator...');
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Pre-task hook for coordination initialization
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks pre-task --description "Initialize agent coordinator"`
|
||||
);
|
||||
console.log('[AgentCoordinator] Claude-flow pre-task hook executed');
|
||||
} catch (error) {
|
||||
console.warn('[AgentCoordinator] Claude-flow hooks not available:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Start health monitoring
|
||||
this.startHealthMonitoring();
|
||||
|
||||
// Start task distribution
|
||||
this.startTaskDistribution();
|
||||
|
||||
this.emit('coordinator:initialized');
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a new agent in the coordination system
|
||||
*/
|
||||
async registerAgent(registration: AgentRegistration): Promise<void> {
|
||||
console.log(`[AgentCoordinator] Registering agent: ${registration.agentId} in ${registration.region}`);
|
||||
|
||||
// Check if region has capacity
|
||||
const regionAgents = Array.from(this.agents.values()).filter(
|
||||
a => a.region === registration.region
|
||||
);
|
||||
|
||||
if (regionAgents.length >= this.config.maxAgentsPerRegion) {
|
||||
throw new Error(`Region ${registration.region} has reached max agent capacity`);
|
||||
}
|
||||
|
||||
this.agents.set(registration.agentId, registration);
|
||||
|
||||
// Initialize circuit breaker for agent
|
||||
this.circuitBreakers.set(
|
||||
registration.agentId,
|
||||
new CircuitBreaker({
|
||||
threshold: this.config.failoverThreshold,
|
||||
timeout: this.config.taskTimeout,
|
||||
})
|
||||
);
|
||||
|
||||
// Initialize metrics
|
||||
this.agentMetrics.set(registration.agentId, {
|
||||
agentId: registration.agentId,
|
||||
region: registration.region,
|
||||
cpuUsage: 0,
|
||||
memoryUsage: 0,
|
||||
activeStreams: 0,
|
||||
queryLatency: 0,
|
||||
timestamp: Date.now(),
|
||||
healthy: true,
|
||||
});
|
||||
|
||||
this.emit('agent:registered', registration);
|
||||
|
||||
console.log(`[AgentCoordinator] Agent ${registration.agentId} registered successfully`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister an agent from the coordination system
|
||||
*/
|
||||
async unregisterAgent(agentId: string): Promise<void> {
|
||||
console.log(`[AgentCoordinator] Unregistering agent: ${agentId}`);
|
||||
|
||||
const agent = this.agents.get(agentId);
|
||||
if (!agent) {
|
||||
throw new Error(`Agent ${agentId} not found`);
|
||||
}
|
||||
|
||||
// Redistribute active tasks
|
||||
const agentTasks = Array.from(this.activeTasks.values()).filter(
|
||||
task => task.region === agent.region
|
||||
);
|
||||
|
||||
for (const task of agentTasks) {
|
||||
await this.redistributeTask(task);
|
||||
}
|
||||
|
||||
this.agents.delete(agentId);
|
||||
this.agentMetrics.delete(agentId);
|
||||
this.circuitBreakers.delete(agentId);
|
||||
|
||||
this.emit('agent:unregistered', { agentId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit a task for distributed execution
|
||||
*/
|
||||
async submitTask(task: Omit<Task, 'id' | 'retries' | 'createdAt'>): Promise<string> {
|
||||
const fullTask: Task = {
|
||||
...task,
|
||||
id: `task-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
retries: 0,
|
||||
createdAt: Date.now(),
|
||||
};
|
||||
|
||||
console.log(`[AgentCoordinator] Submitting task: ${fullTask.id} (type: ${fullTask.type})`);
|
||||
|
||||
// Add to queue based on priority
|
||||
this.insertTaskByPriority(fullTask);
|
||||
|
||||
this.emit('task:submitted', fullTask);
|
||||
|
||||
return fullTask.id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert task into queue maintaining priority order
|
||||
*/
|
||||
private insertTaskByPriority(task: Task): void {
|
||||
let insertIndex = this.taskQueue.findIndex(t => t.priority < task.priority);
|
||||
if (insertIndex === -1) {
|
||||
this.taskQueue.push(task);
|
||||
} else {
|
||||
this.taskQueue.splice(insertIndex, 0, task);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Distribute tasks to agents using configured load balancing strategy
|
||||
*/
|
||||
private async distributeNextTask(): Promise<void> {
|
||||
if (this.taskQueue.length === 0) return;
|
||||
|
||||
const task = this.taskQueue.shift()!;
|
||||
|
||||
try {
|
||||
// Select agent based on load balancing strategy
|
||||
const agent = await this.selectAgent(task);
|
||||
|
||||
if (!agent) {
|
||||
console.warn(`[AgentCoordinator] No available agent for task ${task.id}, requeuing`);
|
||||
this.insertTaskByPriority(task);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check circuit breaker
|
||||
const circuitBreaker = this.circuitBreakers.get(agent.agentId);
|
||||
if (circuitBreaker && !circuitBreaker.canExecute()) {
|
||||
console.warn(`[AgentCoordinator] Circuit breaker open for agent ${agent.agentId}`);
|
||||
await this.failoverTask(task, agent.agentId);
|
||||
return;
|
||||
}
|
||||
|
||||
// Assign task to agent
|
||||
this.activeTasks.set(task.id, { ...task, region: agent.region });
|
||||
|
||||
this.emit('task:assigned', {
|
||||
taskId: task.id,
|
||||
agentId: agent.agentId,
|
||||
region: agent.region,
|
||||
});
|
||||
|
||||
// Execute task with timeout and retry logic
|
||||
await this.executeTaskWithRetry(task, agent);
|
||||
|
||||
} catch (error) {
|
||||
console.error(`[AgentCoordinator] Error distributing task ${task.id}:`, error);
|
||||
await this.handleTaskFailure(task, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Select best agent for task based on load balancing strategy
|
||||
*/
|
||||
private async selectAgent(task: Task): Promise<AgentRegistration | null> {
|
||||
const availableAgents = Array.from(this.agents.values()).filter(agent => {
|
||||
const metrics = this.agentMetrics.get(agent.agentId);
|
||||
return metrics?.healthy && (!task.region || agent.region === task.region);
|
||||
});
|
||||
|
||||
if (availableAgents.length === 0) return null;
|
||||
|
||||
switch (this.config.loadBalancingStrategy) {
|
||||
case 'round-robin':
|
||||
return this.selectAgentRoundRobin(availableAgents, task);
|
||||
|
||||
case 'least-connections':
|
||||
return this.selectAgentLeastConnections(availableAgents);
|
||||
|
||||
case 'weighted':
|
||||
return this.selectAgentWeighted(availableAgents);
|
||||
|
||||
case 'adaptive':
|
||||
return this.selectAgentAdaptive(availableAgents);
|
||||
|
||||
default:
|
||||
return availableAgents[0];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Round-robin load balancing
|
||||
*/
|
||||
private selectAgentRoundRobin(agents: AgentRegistration[], task: Task): AgentRegistration {
|
||||
const region = task.region || 'default';
|
||||
const currentIndex = this.regionLoadIndex.get(region) || 0;
|
||||
const regionAgents = agents.filter(a => !task.region || a.region === task.region);
|
||||
|
||||
const selectedAgent = regionAgents[currentIndex % regionAgents.length];
|
||||
this.regionLoadIndex.set(region, (currentIndex + 1) % regionAgents.length);
|
||||
|
||||
return selectedAgent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Least connections load balancing
|
||||
*/
|
||||
private selectAgentLeastConnections(agents: AgentRegistration[]): AgentRegistration {
|
||||
return agents.reduce((best, agent) => {
|
||||
const bestMetrics = this.agentMetrics.get(best.agentId);
|
||||
const agentMetrics = this.agentMetrics.get(agent.agentId);
|
||||
|
||||
return (agentMetrics?.activeStreams || 0) < (bestMetrics?.activeStreams || 0)
|
||||
? agent
|
||||
: best;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Weighted load balancing based on agent capacity
|
||||
*/
|
||||
private selectAgentWeighted(agents: AgentRegistration[]): AgentRegistration {
|
||||
const totalCapacity = agents.reduce((sum, a) => sum + a.capacity, 0);
|
||||
let random = Math.random() * totalCapacity;
|
||||
|
||||
for (const agent of agents) {
|
||||
random -= agent.capacity;
|
||||
if (random <= 0) return agent;
|
||||
}
|
||||
|
||||
return agents[agents.length - 1];
|
||||
}
|
||||
|
||||
/**
|
||||
* Adaptive load balancing based on real-time metrics
|
||||
*/
|
||||
private selectAgentAdaptive(agents: AgentRegistration[]): AgentRegistration {
|
||||
return agents.reduce((best, agent) => {
|
||||
const bestMetrics = this.agentMetrics.get(best.agentId);
|
||||
const agentMetrics = this.agentMetrics.get(agent.agentId);
|
||||
|
||||
if (!bestMetrics || !agentMetrics) return best;
|
||||
|
||||
// Score based on: low CPU, low memory, low streams, low latency
|
||||
const bestScore = this.calculateAdaptiveScore(bestMetrics);
|
||||
const agentScore = this.calculateAdaptiveScore(agentMetrics);
|
||||
|
||||
return agentScore > bestScore ? agent : best;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate adaptive score for agent selection
|
||||
*/
|
||||
private calculateAdaptiveScore(metrics: AgentMetrics): number {
|
||||
return (
|
||||
(100 - metrics.cpuUsage) * 0.3 +
|
||||
(100 - metrics.memoryUsage) * 0.3 +
|
||||
(1000 - metrics.activeStreams) / 10 * 0.2 +
|
||||
(1000 - metrics.queryLatency) / 10 * 0.2
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute task with exponential backoff retry logic
|
||||
*/
|
||||
private async executeTaskWithRetry(task: Task, agent: AgentRegistration): Promise<void> {
|
||||
const maxRetries = task.maxRetries || 3;
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
const timeout = this.config.taskTimeout;
|
||||
|
||||
// Simulate task execution (replace with actual agent communication)
|
||||
await this.executeTaskOnAgent(task, agent, timeout);
|
||||
|
||||
// Task successful
|
||||
this.activeTasks.delete(task.id);
|
||||
this.emit('task:completed', { taskId: task.id, agentId: agent.agentId });
|
||||
|
||||
// Record success in circuit breaker
|
||||
this.circuitBreakers.get(agent.agentId)?.recordSuccess();
|
||||
|
||||
return;
|
||||
|
||||
} catch (error) {
|
||||
task.retries = attempt + 1;
|
||||
|
||||
if (attempt < maxRetries) {
|
||||
// Calculate backoff delay
|
||||
const backoff = Math.min(
|
||||
this.config.retryBackoffBase * Math.pow(2, attempt),
|
||||
this.config.retryBackoffMax
|
||||
);
|
||||
|
||||
console.warn(
|
||||
`[AgentCoordinator] Task ${task.id} attempt ${attempt + 1} failed, retrying in ${backoff}ms`,
|
||||
error
|
||||
);
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, backoff));
|
||||
} else {
|
||||
// Max retries exceeded
|
||||
console.error(`[AgentCoordinator] Task ${task.id} failed after ${maxRetries} attempts`);
|
||||
await this.handleTaskFailure(task, error);
|
||||
|
||||
// Record failure in circuit breaker
|
||||
this.circuitBreakers.get(agent.agentId)?.recordFailure();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute task on specific agent (placeholder for actual implementation)
|
||||
*/
|
||||
private async executeTaskOnAgent(
|
||||
task: Task,
|
||||
agent: AgentRegistration,
|
||||
timeout: number
|
||||
): Promise<void> {
|
||||
// This would be replaced with actual HTTP/gRPC call to agent endpoint
|
||||
// For now, simulate execution
|
||||
return new Promise((resolve, reject) => {
|
||||
const timer = setTimeout(() => reject(new Error('Task timeout')), timeout);
|
||||
|
||||
// Simulate task execution
|
||||
setTimeout(() => {
|
||||
clearTimeout(timer);
|
||||
resolve();
|
||||
}, Math.random() * 100);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle task failure
|
||||
*/
|
||||
private async handleTaskFailure(task: Task, error: any): Promise<void> {
|
||||
this.activeTasks.delete(task.id);
|
||||
|
||||
this.emit('task:failed', {
|
||||
taskId: task.id,
|
||||
error: error.message,
|
||||
retries: task.retries,
|
||||
});
|
||||
|
||||
// Could implement dead letter queue here
|
||||
console.error(`[AgentCoordinator] Task ${task.id} failed permanently:`, error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Redistribute task to another agent (failover)
|
||||
*/
|
||||
private async redistributeTask(task: Task): Promise<void> {
|
||||
console.log(`[AgentCoordinator] Redistributing task ${task.id}`);
|
||||
|
||||
// Remove region preference to allow any region
|
||||
const redistributedTask = { ...task, region: undefined };
|
||||
this.insertTaskByPriority(redistributedTask);
|
||||
|
||||
this.emit('task:redistributed', { taskId: task.id });
|
||||
}
|
||||
|
||||
/**
|
||||
* Failover task when agent is unavailable
|
||||
*/
|
||||
private async failoverTask(task: Task, failedAgentId: string): Promise<void> {
|
||||
console.log(`[AgentCoordinator] Failing over task ${task.id} from agent ${failedAgentId}`);
|
||||
|
||||
this.activeTasks.delete(task.id);
|
||||
await this.redistributeTask(task);
|
||||
|
||||
this.emit('task:failover', { taskId: task.id, failedAgentId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Update agent metrics
|
||||
*/
|
||||
updateAgentMetrics(metrics: AgentMetrics): void {
|
||||
this.agentMetrics.set(metrics.agentId, {
|
||||
...metrics,
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
// Check if agent health changed
|
||||
const previousMetrics = this.agentMetrics.get(metrics.agentId);
|
||||
if (previousMetrics && previousMetrics.healthy !== metrics.healthy) {
|
||||
this.emit('agent:health-changed', {
|
||||
agentId: metrics.agentId,
|
||||
healthy: metrics.healthy,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start health monitoring loop
|
||||
*/
|
||||
private startHealthMonitoring(): void {
|
||||
this.healthCheckTimer = setInterval(() => {
|
||||
this.performHealthChecks();
|
||||
}, this.config.healthCheckInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform health checks on all agents
|
||||
*/
|
||||
private async performHealthChecks(): Promise<void> {
|
||||
const now = Date.now();
|
||||
|
||||
for (const [agentId, metrics] of this.agentMetrics.entries()) {
|
||||
// Check if metrics are stale (no update in 2x health check interval)
|
||||
const staleThreshold = this.config.healthCheckInterval * 2;
|
||||
const isStale = now - metrics.timestamp > staleThreshold;
|
||||
|
||||
if (isStale && metrics.healthy) {
|
||||
console.warn(`[AgentCoordinator] Agent ${agentId} marked unhealthy (stale metrics)`);
|
||||
|
||||
this.agentMetrics.set(agentId, {
|
||||
...metrics,
|
||||
healthy: false,
|
||||
timestamp: now,
|
||||
});
|
||||
|
||||
this.emit('agent:health-changed', {
|
||||
agentId,
|
||||
healthy: false,
|
||||
reason: 'stale_metrics',
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start task distribution loop
|
||||
*/
|
||||
private startTaskDistribution(): void {
|
||||
this.taskDistributionTimer = setInterval(() => {
|
||||
this.distributeNextTask().catch(error => {
|
||||
console.error('[AgentCoordinator] Error in task distribution:', error);
|
||||
});
|
||||
}, 100); // Distribute tasks every 100ms
|
||||
}
|
||||
|
||||
/**
|
||||
* Get coordinator status
|
||||
*/
|
||||
getStatus(): {
|
||||
totalAgents: number;
|
||||
healthyAgents: number;
|
||||
queuedTasks: number;
|
||||
activeTasks: number;
|
||||
regionDistribution: Record<string, number>;
|
||||
} {
|
||||
const healthyAgents = Array.from(this.agentMetrics.values()).filter(
|
||||
m => m.healthy
|
||||
).length;
|
||||
|
||||
const regionDistribution: Record<string, number> = {};
|
||||
for (const agent of this.agents.values()) {
|
||||
regionDistribution[agent.region] = (regionDistribution[agent.region] || 0) + 1;
|
||||
}
|
||||
|
||||
return {
|
||||
totalAgents: this.agents.size,
|
||||
healthyAgents,
|
||||
queuedTasks: this.taskQueue.length,
|
||||
activeTasks: this.activeTasks.size,
|
||||
regionDistribution,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown coordinator gracefully
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
console.log('[AgentCoordinator] Shutting down coordinator...');
|
||||
|
||||
if (this.healthCheckTimer) {
|
||||
clearInterval(this.healthCheckTimer);
|
||||
}
|
||||
|
||||
if (this.taskDistributionTimer) {
|
||||
clearInterval(this.taskDistributionTimer);
|
||||
}
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Post-task hook
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks post-task --task-id "coordinator-shutdown"`
|
||||
);
|
||||
} catch (error) {
|
||||
console.warn('[AgentCoordinator] Error executing post-task hook:', error);
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('coordinator:shutdown');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Circuit Breaker for agent fault tolerance
|
||||
*/
|
||||
class CircuitBreaker {
|
||||
private failures = 0;
|
||||
private lastFailureTime = 0;
|
||||
private state: 'closed' | 'open' | 'half-open' = 'closed';
|
||||
|
||||
constructor(
|
||||
private config: {
|
||||
threshold: number;
|
||||
timeout: number;
|
||||
}
|
||||
) {}
|
||||
|
||||
canExecute(): boolean {
|
||||
if (this.state === 'closed') return true;
|
||||
|
||||
if (this.state === 'open') {
|
||||
// Check if timeout has passed
|
||||
if (Date.now() - this.lastFailureTime > this.config.timeout) {
|
||||
this.state = 'half-open';
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// half-open: allow one request
|
||||
return true;
|
||||
}
|
||||
|
||||
recordSuccess(): void {
|
||||
this.failures = 0;
|
||||
this.state = 'closed';
|
||||
}
|
||||
|
||||
recordFailure(): void {
|
||||
this.failures++;
|
||||
this.lastFailureTime = Date.now();
|
||||
|
||||
if (this.failures >= this.config.threshold) {
|
||||
this.state = 'open';
|
||||
}
|
||||
}
|
||||
}
|
||||
185
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.d.ts
vendored
Normal file
185
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.d.ts
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
/**
|
||||
* Coordination Protocol - Inter-agent communication and consensus
|
||||
*
|
||||
* Handles:
|
||||
* - Inter-agent messaging
|
||||
* - Consensus for critical operations
|
||||
* - Event-driven coordination
|
||||
* - Pub/Sub integration
|
||||
*/
|
||||
import { EventEmitter } from 'events';
|
||||
export interface Message {
|
||||
id: string;
|
||||
type: 'request' | 'response' | 'broadcast' | 'consensus';
|
||||
from: string;
|
||||
to?: string | string[];
|
||||
topic?: string;
|
||||
payload: any;
|
||||
timestamp: number;
|
||||
ttl: number;
|
||||
priority: number;
|
||||
}
|
||||
export interface ConsensusProposal {
|
||||
id: string;
|
||||
proposer: string;
|
||||
type: 'schema_change' | 'topology_change' | 'critical_operation';
|
||||
data: any;
|
||||
requiredVotes: number;
|
||||
deadline: number;
|
||||
votes: Map<string, boolean>;
|
||||
status: 'pending' | 'accepted' | 'rejected' | 'expired';
|
||||
}
|
||||
export interface PubSubTopic {
|
||||
name: string;
|
||||
subscribers: Set<string>;
|
||||
messageHistory: Message[];
|
||||
maxHistorySize: number;
|
||||
}
|
||||
export interface CoordinationProtocolConfig {
|
||||
nodeId: string;
|
||||
heartbeatInterval: number;
|
||||
messageTimeout: number;
|
||||
consensusTimeout: number;
|
||||
maxMessageQueueSize: number;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
pubSubTopics: string[];
|
||||
}
|
||||
export declare class CoordinationProtocol extends EventEmitter {
|
||||
private config;
|
||||
private messageQueue;
|
||||
private sentMessages;
|
||||
private pendingResponses;
|
||||
private consensusProposals;
|
||||
private pubSubTopics;
|
||||
private knownNodes;
|
||||
private lastHeartbeat;
|
||||
private heartbeatTimer?;
|
||||
private messageProcessingTimer?;
|
||||
private messageCounter;
|
||||
constructor(config: CoordinationProtocolConfig);
|
||||
/**
|
||||
* Initialize coordination protocol
|
||||
*/
|
||||
private initialize;
|
||||
/**
|
||||
* Send message to another node
|
||||
*/
|
||||
sendMessage(to: string, type: Message['type'], payload: any, options?: {
|
||||
topic?: string;
|
||||
ttl?: number;
|
||||
priority?: number;
|
||||
expectResponse?: boolean;
|
||||
}): Promise<any>;
|
||||
/**
|
||||
* Broadcast message to all nodes
|
||||
*/
|
||||
broadcastMessage(type: Message['type'], payload: any, options?: {
|
||||
topic?: string;
|
||||
ttl?: number;
|
||||
priority?: number;
|
||||
}): Promise<void>;
|
||||
/**
|
||||
* Receive and handle message
|
||||
*/
|
||||
receiveMessage(message: Message): Promise<void>;
|
||||
/**
|
||||
* Handle request message
|
||||
*/
|
||||
private handleRequest;
|
||||
/**
|
||||
* Send response to a request
|
||||
*/
|
||||
sendResponse(requestId: string, to: string, payload: any): Promise<void>;
|
||||
/**
|
||||
* Handle response message
|
||||
*/
|
||||
private handleResponse;
|
||||
/**
|
||||
* Handle broadcast message
|
||||
*/
|
||||
private handleBroadcast;
|
||||
/**
|
||||
* Propose consensus for critical operation
|
||||
*/
|
||||
proposeConsensus(type: ConsensusProposal['type'], data: any, requiredVotes?: number): Promise<boolean>;
|
||||
/**
|
||||
* Handle consensus message
|
||||
*/
|
||||
private handleConsensusMessage;
|
||||
/**
|
||||
* Handle consensus proposal
|
||||
*/
|
||||
private handleConsensusProposal;
|
||||
/**
|
||||
* Handle consensus vote
|
||||
*/
|
||||
private handleConsensusVote;
|
||||
/**
|
||||
* Create pub/sub topic
|
||||
*/
|
||||
createTopic(name: string, maxHistorySize?: number): void;
|
||||
/**
|
||||
* Subscribe to pub/sub topic
|
||||
*/
|
||||
subscribe(topicName: string, subscriberId: string): void;
|
||||
/**
|
||||
* Unsubscribe from pub/sub topic
|
||||
*/
|
||||
unsubscribe(topicName: string, subscriberId: string): void;
|
||||
/**
|
||||
* Publish message to topic
|
||||
*/
|
||||
publishToTopic(topicName: string, payload: any): Promise<void>;
|
||||
/**
|
||||
* Deliver message to topic subscribers
|
||||
*/
|
||||
private deliverToTopic;
|
||||
/**
|
||||
* Enqueue message for processing
|
||||
*/
|
||||
private enqueueMessage;
|
||||
/**
|
||||
* Start message processing loop
|
||||
*/
|
||||
private startMessageProcessing;
|
||||
/**
|
||||
* Process queued messages
|
||||
*/
|
||||
private processMessages;
|
||||
/**
|
||||
* Start heartbeat mechanism
|
||||
*/
|
||||
private startHeartbeat;
|
||||
/**
|
||||
* Send heartbeat to all known nodes
|
||||
*/
|
||||
private sendHeartbeat;
|
||||
/**
|
||||
* Check health of known nodes
|
||||
*/
|
||||
private checkNodeHealth;
|
||||
/**
|
||||
* Register a node in the network
|
||||
*/
|
||||
registerNode(nodeId: string): void;
|
||||
/**
|
||||
* Unregister a node from the network
|
||||
*/
|
||||
unregisterNode(nodeId: string): void;
|
||||
/**
|
||||
* Get protocol status
|
||||
*/
|
||||
getStatus(): {
|
||||
nodeId: string;
|
||||
knownNodes: number;
|
||||
queuedMessages: number;
|
||||
pendingResponses: number;
|
||||
activeConsensus: number;
|
||||
topics: string[];
|
||||
};
|
||||
/**
|
||||
* Shutdown protocol gracefully
|
||||
*/
|
||||
shutdown(): Promise<void>;
|
||||
}
|
||||
//# sourceMappingURL=coordination-protocol.d.ts.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.d.ts.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"coordination-protocol.d.ts","sourceRoot":"","sources":["coordination-protocol.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH,OAAO,EAAE,YAAY,EAAE,MAAM,QAAQ,CAAC;AAMtC,MAAM,WAAW,OAAO;IACtB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,SAAS,GAAG,UAAU,GAAG,WAAW,GAAG,WAAW,CAAC;IACzD,IAAI,EAAE,MAAM,CAAC;IACb,EAAE,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;IACvB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,GAAG,CAAC;IACb,SAAS,EAAE,MAAM,CAAC;IAClB,GAAG,EAAE,MAAM,CAAC;IACZ,QAAQ,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB;IAChC,EAAE,EAAE,MAAM,CAAC;IACX,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,eAAe,GAAG,iBAAiB,GAAG,oBAAoB,CAAC;IACjE,IAAI,EAAE,GAAG,CAAC;IACV,aAAa,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,MAAM,CAAC;IACjB,KAAK,EAAE,GAAG,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAC5B,MAAM,EAAE,SAAS,GAAG,UAAU,GAAG,UAAU,GAAG,SAAS,CAAC;CACzD;AAED,MAAM,WAAW,WAAW;IAC1B,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC;IACzB,cAAc,EAAE,OAAO,EAAE,CAAC;IAC1B,cAAc,EAAE,MAAM,CAAC;CACxB;AAED,MAAM,WAAW,0BAA0B;IACzC,MAAM,EAAE,MAAM,CAAC;IACf,iBAAiB,EAAE,MAAM,CAAC;IAC1B,cAAc,EAAE,MAAM,CAAC;IACvB,gBAAgB,EAAE,MAAM,CAAC;IACzB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,qBAAqB,EAAE,OAAO,CAAC;IAC/B,YAAY,EAAE,MAAM,EAAE,CAAC;CACxB;AAED,qBAAa,oBAAqB,SAAQ,YAAY;IAgBxC,OAAO,CAAC,MAAM;IAf1B,OAAO,CAAC,YAAY,CAAiB;IACrC,OAAO,CAAC,YAAY,CAAmC;IACvD,OAAO,CAAC,gBAAgB,CAIT;IACf,OAAO,CAAC,kBAAkB,CAA6C;IACvE,OAAO,CAAC,YAAY,CAAuC;IAC3D,OAAO,CAAC,UAAU,CAA0B;IAC5C,OAAO,CAAC,aAAa,CAAkC;IACvD,OAAO,CAAC,cAAc,CAAC,CAAiB;IACxC,OAAO,CAAC,sBAAsB,CAAC,CAAiB;IAChD,OAAO,CAAC,cAAc,CAAK;gBAEP,MAAM,EAAE,0BAA0B;IAKtD;;OAEG;YACW,UAAU;IA6BxB;;OAEG;IACG,WAAW,CACf,EAAE,EAAE,MAAM,EACV,IAAI,EAAE,OAAO,CAAC,MAAM,CAAC,EACrB,OAAO,EAAE,GAAG,EACZ,OAAO,GAAE;QACP,KAAK,CAAC,EAAE,MAAM,CAAC;QACf,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,cAAc,CAAC,EAAE,OAAO,CAAC;KACrB,GACL,OAAO,CAAC,GAAG,CAAC;IA0Cf;;OAEG;IACG,gBAAgB,CACpB,IAAI,EAAE,OAAO,CAAC,MAAM,CAAC,EACrB,OAAO,EAAE,GAAG,EACZ,OAAO,GAAE;QACP,KAAK,CAAC,EAAE,MAAM,CAAC;QACf,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,QAAQ,CAAC,EAAE,MAAM,CAAC;KACd,GACL,OAAO,CAAC,IAAI,CAAC;IAiBhB;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IA4CrD;;OAEG;YACW,aAAa;IAa3B;;OAEG;IACG,YAAY,CAAC,SAAS,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,GAAG,GAAG,OAAO,CAAC,IAAI,CAAC;IAkB9E;;OAEG;YACW,cAAc;IAa5B;;OAEG;YACW,eAAe;IAY7B;;OAEG;IACG,gBAAgB,CACpB,IAAI,EAAE,iBAAiB,CAAC,MAAM,CAAC,EAC/B,IAAI,EAAE,GAAG,EACT,aAAa,GAAE,MAAiD,GAC/D,OAAO,CAAC,OAAO,CAAC;IA4DnB;;OAEG;YACW,sBAAsB;IAqBpC;;OAEG;YACW,uBAAuB;IA+BrC;;OAEG;YACW,mBAAmB;IAsCjC;;OAEG;IACH,WAAW,CAAC,IAAI,EAAE,MAAM,EAAE,cAAc,GAAE,MAAY,GAAG,IAAI;IAkB7D;;OAEG;IACH,SAAS,CAAC,SAAS,EAAE,MAAM,EAAE,YAAY,EAAE,MAAM,GAAG,IAAI;IAgBxD;;OAEG;IACH,WAAW,CAAC,SAAS,EAAE,MAAM,EAAE,YAAY,EAAE,MAAM,GAAG,IAAI;IAgB1D;;OAEG;IACG,cAAc,CAAC,SAAS,EAAE,MAAM,EAAE,OAAO,EAAE,GAAG,GAAG,OAAO,CAAC,IAAI,CAAC;IAwCpE;;OAEG;IACH,OAAO,CAAC,cAAc;IAetB;;OAEG;IACH,OAAO,CAAC,cAAc;IAoBtB;;OAEG;IACH,OAAO,CAAC,sBAAsB;IAM9B;;OAEG;YACW,eAAe;IAiB7B;;OAEG;IACH,OAAO,CAAC,cAAc;IAOtB;;OAEG;YACW,aAAa;IAQ3B;;OAEG;IACH,OAAO,CAAC,eAAe;IAevB;;OAEG;IACH,YAAY,CAAC,MAAM,EAAE,MAAM,GAAG,IAAI;IASlC;;OAEG;IACH,cAAc,CAAC,MAAM,EAAE,MAAM,GAAG,IAAI;IASpC;;OAEG;IACH,SAAS,IAAI;QACX,MAAM,EAAE,MAAM,CAAC;QACf,UAAU,EAAE,MAAM,CAAC;QACnB,cAAc,EAAE,MAAM,CAAC;QACvB,gBAAgB,EAAE,MAAM,CAAC;QACzB,eAAe,EAAE,MAAM,CAAC;QACxB,MAAM,EAAE,MAAM,EAAE,CAAC;KAClB;IAaD;;OAEG;IACG,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;CAiChC"}
|
||||
546
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.js
vendored
Normal file
546
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.js
vendored
Normal file
@@ -0,0 +1,546 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Coordination Protocol - Inter-agent communication and consensus
|
||||
*
|
||||
* Handles:
|
||||
* - Inter-agent messaging
|
||||
* - Consensus for critical operations
|
||||
* - Event-driven coordination
|
||||
* - Pub/Sub integration
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CoordinationProtocol = void 0;
|
||||
const events_1 = require("events");
|
||||
const child_process_1 = require("child_process");
|
||||
const util_1 = require("util");
|
||||
const execAsync = (0, util_1.promisify)(child_process_1.exec);
|
||||
class CoordinationProtocol extends events_1.EventEmitter {
|
||||
constructor(config) {
|
||||
super();
|
||||
this.config = config;
|
||||
this.messageQueue = [];
|
||||
this.sentMessages = new Map();
|
||||
this.pendingResponses = new Map();
|
||||
this.consensusProposals = new Map();
|
||||
this.pubSubTopics = new Map();
|
||||
this.knownNodes = new Set();
|
||||
this.lastHeartbeat = new Map();
|
||||
this.messageCounter = 0;
|
||||
this.initialize();
|
||||
}
|
||||
/**
|
||||
* Initialize coordination protocol
|
||||
*/
|
||||
async initialize() {
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Initializing protocol...`);
|
||||
// Initialize pub/sub topics
|
||||
for (const topicName of this.config.pubSubTopics) {
|
||||
this.createTopic(topicName);
|
||||
}
|
||||
// Start heartbeat
|
||||
this.startHeartbeat();
|
||||
// Start message processing
|
||||
this.startMessageProcessing();
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(`npx claude-flow@alpha hooks pre-task --description "Initialize coordination protocol for node ${this.config.nodeId}"`);
|
||||
}
|
||||
catch (error) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Claude-flow hooks not available`);
|
||||
}
|
||||
}
|
||||
this.emit('protocol:initialized');
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Protocol initialized`);
|
||||
}
|
||||
/**
|
||||
* Send message to another node
|
||||
*/
|
||||
async sendMessage(to, type, payload, options = {}) {
|
||||
const message = {
|
||||
id: `msg-${this.config.nodeId}-${this.messageCounter++}`,
|
||||
type,
|
||||
from: this.config.nodeId,
|
||||
to,
|
||||
topic: options.topic,
|
||||
payload,
|
||||
timestamp: Date.now(),
|
||||
ttl: options.ttl || this.config.messageTimeout,
|
||||
priority: options.priority || 0,
|
||||
};
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Sending ${type} message ${message.id} to ${to}`);
|
||||
// Add to queue
|
||||
this.enqueueMessage(message);
|
||||
// Track sent message
|
||||
this.sentMessages.set(message.id, message);
|
||||
// If expecting response, create promise
|
||||
if (options.expectResponse) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeout = setTimeout(() => {
|
||||
this.pendingResponses.delete(message.id);
|
||||
reject(new Error(`Message ${message.id} timed out`));
|
||||
}, message.ttl);
|
||||
this.pendingResponses.set(message.id, {
|
||||
resolve,
|
||||
reject,
|
||||
timeout,
|
||||
});
|
||||
});
|
||||
}
|
||||
this.emit('message:sent', message);
|
||||
}
|
||||
/**
|
||||
* Broadcast message to all nodes
|
||||
*/
|
||||
async broadcastMessage(type, payload, options = {}) {
|
||||
const recipients = Array.from(this.knownNodes);
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Broadcasting ${type} message to ${recipients.length} nodes`);
|
||||
for (const recipient of recipients) {
|
||||
await this.sendMessage(recipient, type, payload, {
|
||||
...options,
|
||||
expectResponse: false,
|
||||
});
|
||||
}
|
||||
this.emit('message:broadcast', { type, recipientCount: recipients.length });
|
||||
}
|
||||
/**
|
||||
* Receive and handle message
|
||||
*/
|
||||
async receiveMessage(message) {
|
||||
// Check if message is expired
|
||||
if (Date.now() - message.timestamp > message.ttl) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Received expired message ${message.id}`);
|
||||
return;
|
||||
}
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Received ${message.type} message ${message.id} from ${message.from}`);
|
||||
// Handle different message types
|
||||
switch (message.type) {
|
||||
case 'request':
|
||||
await this.handleRequest(message);
|
||||
break;
|
||||
case 'response':
|
||||
await this.handleResponse(message);
|
||||
break;
|
||||
case 'broadcast':
|
||||
await this.handleBroadcast(message);
|
||||
break;
|
||||
case 'consensus':
|
||||
await this.handleConsensusMessage(message);
|
||||
break;
|
||||
default:
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Unknown message type: ${message.type}`);
|
||||
}
|
||||
// Update last contact time
|
||||
this.lastHeartbeat.set(message.from, Date.now());
|
||||
this.knownNodes.add(message.from);
|
||||
this.emit('message:received', message);
|
||||
}
|
||||
/**
|
||||
* Handle request message
|
||||
*/
|
||||
async handleRequest(message) {
|
||||
this.emit('request:received', message);
|
||||
// Application can handle request and send response
|
||||
// Example auto-response for health checks
|
||||
if (message.payload.type === 'health_check') {
|
||||
await this.sendResponse(message.id, message.from, {
|
||||
status: 'healthy',
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Send response to a request
|
||||
*/
|
||||
async sendResponse(requestId, to, payload) {
|
||||
const response = {
|
||||
id: `resp-${requestId}`,
|
||||
type: 'response',
|
||||
from: this.config.nodeId,
|
||||
to,
|
||||
payload: {
|
||||
requestId,
|
||||
...payload,
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
ttl: this.config.messageTimeout,
|
||||
priority: 1,
|
||||
};
|
||||
await this.sendMessage(to, 'response', response.payload);
|
||||
}
|
||||
/**
|
||||
* Handle response message
|
||||
*/
|
||||
async handleResponse(message) {
|
||||
const requestId = message.payload.requestId;
|
||||
const pending = this.pendingResponses.get(requestId);
|
||||
if (pending) {
|
||||
clearTimeout(pending.timeout);
|
||||
pending.resolve(message.payload);
|
||||
this.pendingResponses.delete(requestId);
|
||||
}
|
||||
this.emit('response:received', message);
|
||||
}
|
||||
/**
|
||||
* Handle broadcast message
|
||||
*/
|
||||
async handleBroadcast(message) {
|
||||
// If message has topic, deliver to topic subscribers
|
||||
if (message.topic) {
|
||||
const topic = this.pubSubTopics.get(message.topic);
|
||||
if (topic) {
|
||||
this.deliverToTopic(message, topic);
|
||||
}
|
||||
}
|
||||
this.emit('broadcast:received', message);
|
||||
}
|
||||
/**
|
||||
* Propose consensus for critical operation
|
||||
*/
|
||||
async proposeConsensus(type, data, requiredVotes = Math.floor(this.knownNodes.size / 2) + 1) {
|
||||
const proposal = {
|
||||
id: `consensus-${this.config.nodeId}-${Date.now()}`,
|
||||
proposer: this.config.nodeId,
|
||||
type,
|
||||
data,
|
||||
requiredVotes,
|
||||
deadline: Date.now() + this.config.consensusTimeout,
|
||||
votes: new Map([[this.config.nodeId, true]]), // Proposer votes yes
|
||||
status: 'pending',
|
||||
};
|
||||
this.consensusProposals.set(proposal.id, proposal);
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Proposing consensus ${proposal.id} (type: ${type})`);
|
||||
// Broadcast consensus proposal
|
||||
await this.broadcastMessage('consensus', {
|
||||
action: 'propose',
|
||||
proposal: {
|
||||
id: proposal.id,
|
||||
proposer: proposal.proposer,
|
||||
type: proposal.type,
|
||||
data: proposal.data,
|
||||
requiredVotes: proposal.requiredVotes,
|
||||
deadline: proposal.deadline,
|
||||
},
|
||||
});
|
||||
// Wait for consensus
|
||||
return new Promise((resolve) => {
|
||||
const checkInterval = setInterval(() => {
|
||||
const currentProposal = this.consensusProposals.get(proposal.id);
|
||||
if (!currentProposal) {
|
||||
clearInterval(checkInterval);
|
||||
resolve(false);
|
||||
return;
|
||||
}
|
||||
if (currentProposal.status === 'accepted') {
|
||||
clearInterval(checkInterval);
|
||||
resolve(true);
|
||||
}
|
||||
else if (currentProposal.status === 'rejected' ||
|
||||
currentProposal.status === 'expired') {
|
||||
clearInterval(checkInterval);
|
||||
resolve(false);
|
||||
}
|
||||
else if (Date.now() > currentProposal.deadline) {
|
||||
currentProposal.status = 'expired';
|
||||
clearInterval(checkInterval);
|
||||
resolve(false);
|
||||
}
|
||||
}, 100);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Handle consensus message
|
||||
*/
|
||||
async handleConsensusMessage(message) {
|
||||
const { action, proposal, vote } = message.payload;
|
||||
switch (action) {
|
||||
case 'propose':
|
||||
// New proposal received
|
||||
await this.handleConsensusProposal(proposal, message.from);
|
||||
break;
|
||||
case 'vote':
|
||||
// Vote received for proposal
|
||||
await this.handleConsensusVote(vote.proposalId, message.from, vote.approve);
|
||||
break;
|
||||
default:
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Unknown consensus action: ${action}`);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Handle consensus proposal
|
||||
*/
|
||||
async handleConsensusProposal(proposalData, from) {
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Received consensus proposal ${proposalData.id} from ${from}`);
|
||||
// Store proposal
|
||||
const proposal = {
|
||||
...proposalData,
|
||||
votes: new Map([[proposalData.proposer, true]]),
|
||||
status: 'pending',
|
||||
};
|
||||
this.consensusProposals.set(proposal.id, proposal);
|
||||
// Emit event for application to decide
|
||||
this.emit('consensus:proposed', proposal);
|
||||
// Auto-approve for demo (in production, application decides)
|
||||
const approve = true;
|
||||
// Send vote
|
||||
await this.sendMessage(proposal.proposer, 'consensus', {
|
||||
action: 'vote',
|
||||
vote: {
|
||||
proposalId: proposal.id,
|
||||
approve,
|
||||
voter: this.config.nodeId,
|
||||
},
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Handle consensus vote
|
||||
*/
|
||||
async handleConsensusVote(proposalId, voter, approve) {
|
||||
const proposal = this.consensusProposals.get(proposalId);
|
||||
if (!proposal || proposal.status !== 'pending') {
|
||||
return;
|
||||
}
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Received ${approve ? 'approval' : 'rejection'} vote from ${voter} for proposal ${proposalId}`);
|
||||
// Record vote
|
||||
proposal.votes.set(voter, approve);
|
||||
// Count votes
|
||||
const approvals = Array.from(proposal.votes.values()).filter(v => v).length;
|
||||
const rejections = proposal.votes.size - approvals;
|
||||
// Check if consensus reached
|
||||
if (approvals >= proposal.requiredVotes) {
|
||||
proposal.status = 'accepted';
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Consensus ${proposalId} accepted (${approvals}/${proposal.requiredVotes} votes)`);
|
||||
this.emit('consensus:accepted', proposal);
|
||||
}
|
||||
else if (rejections > this.knownNodes.size - proposal.requiredVotes) {
|
||||
proposal.status = 'rejected';
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Consensus ${proposalId} rejected (${rejections} rejections)`);
|
||||
this.emit('consensus:rejected', proposal);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Create pub/sub topic
|
||||
*/
|
||||
createTopic(name, maxHistorySize = 100) {
|
||||
if (this.pubSubTopics.has(name)) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Topic ${name} already exists`);
|
||||
return;
|
||||
}
|
||||
const topic = {
|
||||
name,
|
||||
subscribers: new Set(),
|
||||
messageHistory: [],
|
||||
maxHistorySize,
|
||||
};
|
||||
this.pubSubTopics.set(name, topic);
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Created topic: ${name}`);
|
||||
}
|
||||
/**
|
||||
* Subscribe to pub/sub topic
|
||||
*/
|
||||
subscribe(topicName, subscriberId) {
|
||||
const topic = this.pubSubTopics.get(topicName);
|
||||
if (!topic) {
|
||||
throw new Error(`Topic ${topicName} does not exist`);
|
||||
}
|
||||
topic.subscribers.add(subscriberId);
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Node ${subscriberId} subscribed to topic ${topicName}`);
|
||||
this.emit('topic:subscribed', { topicName, subscriberId });
|
||||
}
|
||||
/**
|
||||
* Unsubscribe from pub/sub topic
|
||||
*/
|
||||
unsubscribe(topicName, subscriberId) {
|
||||
const topic = this.pubSubTopics.get(topicName);
|
||||
if (!topic) {
|
||||
return;
|
||||
}
|
||||
topic.subscribers.delete(subscriberId);
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Node ${subscriberId} unsubscribed from topic ${topicName}`);
|
||||
this.emit('topic:unsubscribed', { topicName, subscriberId });
|
||||
}
|
||||
/**
|
||||
* Publish message to topic
|
||||
*/
|
||||
async publishToTopic(topicName, payload) {
|
||||
const topic = this.pubSubTopics.get(topicName);
|
||||
if (!topic) {
|
||||
throw new Error(`Topic ${topicName} does not exist`);
|
||||
}
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Publishing to topic ${topicName} (${topic.subscribers.size} subscribers)`);
|
||||
// Broadcast to all subscribers
|
||||
for (const subscriber of topic.subscribers) {
|
||||
await this.sendMessage(subscriber, 'broadcast', payload, {
|
||||
topic: topicName,
|
||||
});
|
||||
}
|
||||
// Store in message history
|
||||
const message = {
|
||||
id: `topic-${topicName}-${Date.now()}`,
|
||||
type: 'broadcast',
|
||||
from: this.config.nodeId,
|
||||
topic: topicName,
|
||||
payload,
|
||||
timestamp: Date.now(),
|
||||
ttl: this.config.messageTimeout,
|
||||
priority: 0,
|
||||
};
|
||||
topic.messageHistory.push(message);
|
||||
// Trim history if needed
|
||||
if (topic.messageHistory.length > topic.maxHistorySize) {
|
||||
topic.messageHistory.shift();
|
||||
}
|
||||
this.emit('topic:published', { topicName, message });
|
||||
}
|
||||
/**
|
||||
* Deliver message to topic subscribers
|
||||
*/
|
||||
deliverToTopic(message, topic) {
|
||||
// Store in history
|
||||
topic.messageHistory.push(message);
|
||||
if (topic.messageHistory.length > topic.maxHistorySize) {
|
||||
topic.messageHistory.shift();
|
||||
}
|
||||
// Emit to local subscribers
|
||||
this.emit('topic:message', {
|
||||
topicName: topic.name,
|
||||
message,
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Enqueue message for processing
|
||||
*/
|
||||
enqueueMessage(message) {
|
||||
if (this.messageQueue.length >= this.config.maxMessageQueueSize) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Message queue full, dropping lowest priority message`);
|
||||
// Remove lowest priority message
|
||||
this.messageQueue.sort((a, b) => b.priority - a.priority);
|
||||
this.messageQueue.pop();
|
||||
}
|
||||
// Insert message by priority
|
||||
let insertIndex = this.messageQueue.findIndex(m => m.priority < message.priority);
|
||||
if (insertIndex === -1) {
|
||||
this.messageQueue.push(message);
|
||||
}
|
||||
else {
|
||||
this.messageQueue.splice(insertIndex, 0, message);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Start message processing loop
|
||||
*/
|
||||
startMessageProcessing() {
|
||||
this.messageProcessingTimer = setInterval(() => {
|
||||
this.processMessages();
|
||||
}, 10); // Process every 10ms
|
||||
}
|
||||
/**
|
||||
* Process queued messages
|
||||
*/
|
||||
async processMessages() {
|
||||
while (this.messageQueue.length > 0) {
|
||||
const message = this.messageQueue.shift();
|
||||
// Check if message expired
|
||||
if (Date.now() - message.timestamp > message.ttl) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Message ${message.id} expired before processing`);
|
||||
continue;
|
||||
}
|
||||
// Simulate message transmission (replace with actual network call)
|
||||
this.emit('message:transmit', message);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Start heartbeat mechanism
|
||||
*/
|
||||
startHeartbeat() {
|
||||
this.heartbeatTimer = setInterval(() => {
|
||||
this.sendHeartbeat();
|
||||
this.checkNodeHealth();
|
||||
}, this.config.heartbeatInterval);
|
||||
}
|
||||
/**
|
||||
* Send heartbeat to all known nodes
|
||||
*/
|
||||
async sendHeartbeat() {
|
||||
await this.broadcastMessage('request', {
|
||||
type: 'heartbeat',
|
||||
nodeId: this.config.nodeId,
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Check health of known nodes
|
||||
*/
|
||||
checkNodeHealth() {
|
||||
const now = Date.now();
|
||||
const unhealthyThreshold = this.config.heartbeatInterval * 3;
|
||||
for (const [nodeId, lastSeen] of this.lastHeartbeat.entries()) {
|
||||
if (now - lastSeen > unhealthyThreshold) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Node ${nodeId} appears unhealthy (last seen ${Math.floor((now - lastSeen) / 1000)}s ago)`);
|
||||
this.emit('node:unhealthy', { nodeId, lastSeen });
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Register a node in the network
|
||||
*/
|
||||
registerNode(nodeId) {
|
||||
this.knownNodes.add(nodeId);
|
||||
this.lastHeartbeat.set(nodeId, Date.now());
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Registered node: ${nodeId}`);
|
||||
this.emit('node:registered', { nodeId });
|
||||
}
|
||||
/**
|
||||
* Unregister a node from the network
|
||||
*/
|
||||
unregisterNode(nodeId) {
|
||||
this.knownNodes.delete(nodeId);
|
||||
this.lastHeartbeat.delete(nodeId);
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Unregistered node: ${nodeId}`);
|
||||
this.emit('node:unregistered', { nodeId });
|
||||
}
|
||||
/**
|
||||
* Get protocol status
|
||||
*/
|
||||
getStatus() {
|
||||
return {
|
||||
nodeId: this.config.nodeId,
|
||||
knownNodes: this.knownNodes.size,
|
||||
queuedMessages: this.messageQueue.length,
|
||||
pendingResponses: this.pendingResponses.size,
|
||||
activeConsensus: Array.from(this.consensusProposals.values()).filter(p => p.status === 'pending').length,
|
||||
topics: Array.from(this.pubSubTopics.keys()),
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Shutdown protocol gracefully
|
||||
*/
|
||||
async shutdown() {
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Shutting down protocol...`);
|
||||
// Stop timers
|
||||
if (this.heartbeatTimer) {
|
||||
clearInterval(this.heartbeatTimer);
|
||||
}
|
||||
if (this.messageProcessingTimer) {
|
||||
clearInterval(this.messageProcessingTimer);
|
||||
}
|
||||
// Process remaining messages
|
||||
await this.processMessages();
|
||||
// Clear pending responses
|
||||
for (const [messageId, pending] of this.pendingResponses.entries()) {
|
||||
clearTimeout(pending.timeout);
|
||||
pending.reject(new Error('Protocol shutting down'));
|
||||
}
|
||||
this.pendingResponses.clear();
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(`npx claude-flow@alpha hooks post-task --task-id "protocol-${this.config.nodeId}-shutdown"`);
|
||||
}
|
||||
catch (error) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Error executing shutdown hooks`);
|
||||
}
|
||||
}
|
||||
this.emit('protocol:shutdown');
|
||||
}
|
||||
}
|
||||
exports.CoordinationProtocol = CoordinationProtocol;
|
||||
//# sourceMappingURL=coordination-protocol.js.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.js.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
768
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.ts
vendored
Normal file
768
vendor/ruvector/npm/packages/agentic-integration/coordination-protocol.ts
vendored
Normal file
@@ -0,0 +1,768 @@
|
||||
/**
|
||||
* Coordination Protocol - Inter-agent communication and consensus
|
||||
*
|
||||
* Handles:
|
||||
* - Inter-agent messaging
|
||||
* - Consensus for critical operations
|
||||
* - Event-driven coordination
|
||||
* - Pub/Sub integration
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
export interface Message {
|
||||
id: string;
|
||||
type: 'request' | 'response' | 'broadcast' | 'consensus';
|
||||
from: string;
|
||||
to?: string | string[]; // Single recipient or multiple for broadcast
|
||||
topic?: string;
|
||||
payload: any;
|
||||
timestamp: number;
|
||||
ttl: number; // Time to live in milliseconds
|
||||
priority: number;
|
||||
}
|
||||
|
||||
export interface ConsensusProposal {
|
||||
id: string;
|
||||
proposer: string;
|
||||
type: 'schema_change' | 'topology_change' | 'critical_operation';
|
||||
data: any;
|
||||
requiredVotes: number;
|
||||
deadline: number;
|
||||
votes: Map<string, boolean>;
|
||||
status: 'pending' | 'accepted' | 'rejected' | 'expired';
|
||||
}
|
||||
|
||||
export interface PubSubTopic {
|
||||
name: string;
|
||||
subscribers: Set<string>;
|
||||
messageHistory: Message[];
|
||||
maxHistorySize: number;
|
||||
}
|
||||
|
||||
export interface CoordinationProtocolConfig {
|
||||
nodeId: string;
|
||||
heartbeatInterval: number;
|
||||
messageTimeout: number;
|
||||
consensusTimeout: number;
|
||||
maxMessageQueueSize: number;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
pubSubTopics: string[];
|
||||
}
|
||||
|
||||
export class CoordinationProtocol extends EventEmitter {
|
||||
private messageQueue: Message[] = [];
|
||||
private sentMessages: Map<string, Message> = new Map();
|
||||
private pendingResponses: Map<string, {
|
||||
resolve: (value: any) => void;
|
||||
reject: (error: Error) => void;
|
||||
timeout: NodeJS.Timeout;
|
||||
}> = new Map();
|
||||
private consensusProposals: Map<string, ConsensusProposal> = new Map();
|
||||
private pubSubTopics: Map<string, PubSubTopic> = new Map();
|
||||
private knownNodes: Set<string> = new Set();
|
||||
private lastHeartbeat: Map<string, number> = new Map();
|
||||
private heartbeatTimer?: NodeJS.Timeout;
|
||||
private messageProcessingTimer?: NodeJS.Timeout;
|
||||
private messageCounter = 0;
|
||||
|
||||
constructor(private config: CoordinationProtocolConfig) {
|
||||
super();
|
||||
this.initialize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize coordination protocol
|
||||
*/
|
||||
private async initialize(): Promise<void> {
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Initializing protocol...`);
|
||||
|
||||
// Initialize pub/sub topics
|
||||
for (const topicName of this.config.pubSubTopics) {
|
||||
this.createTopic(topicName);
|
||||
}
|
||||
|
||||
// Start heartbeat
|
||||
this.startHeartbeat();
|
||||
|
||||
// Start message processing
|
||||
this.startMessageProcessing();
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks pre-task --description "Initialize coordination protocol for node ${this.config.nodeId}"`
|
||||
);
|
||||
} catch (error) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Claude-flow hooks not available`);
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('protocol:initialized');
|
||||
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Protocol initialized`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send message to another node
|
||||
*/
|
||||
async sendMessage(
|
||||
to: string,
|
||||
type: Message['type'],
|
||||
payload: any,
|
||||
options: {
|
||||
topic?: string;
|
||||
ttl?: number;
|
||||
priority?: number;
|
||||
expectResponse?: boolean;
|
||||
} = {}
|
||||
): Promise<any> {
|
||||
const message: Message = {
|
||||
id: `msg-${this.config.nodeId}-${this.messageCounter++}`,
|
||||
type,
|
||||
from: this.config.nodeId,
|
||||
to,
|
||||
topic: options.topic,
|
||||
payload,
|
||||
timestamp: Date.now(),
|
||||
ttl: options.ttl || this.config.messageTimeout,
|
||||
priority: options.priority || 0,
|
||||
};
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Sending ${type} message ${message.id} to ${to}`
|
||||
);
|
||||
|
||||
// Add to queue
|
||||
this.enqueueMessage(message);
|
||||
|
||||
// Track sent message
|
||||
this.sentMessages.set(message.id, message);
|
||||
|
||||
// If expecting response, create promise
|
||||
if (options.expectResponse) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeout = setTimeout(() => {
|
||||
this.pendingResponses.delete(message.id);
|
||||
reject(new Error(`Message ${message.id} timed out`));
|
||||
}, message.ttl);
|
||||
|
||||
this.pendingResponses.set(message.id, {
|
||||
resolve,
|
||||
reject,
|
||||
timeout,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
this.emit('message:sent', message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Broadcast message to all nodes
|
||||
*/
|
||||
async broadcastMessage(
|
||||
type: Message['type'],
|
||||
payload: any,
|
||||
options: {
|
||||
topic?: string;
|
||||
ttl?: number;
|
||||
priority?: number;
|
||||
} = {}
|
||||
): Promise<void> {
|
||||
const recipients = Array.from(this.knownNodes);
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Broadcasting ${type} message to ${recipients.length} nodes`
|
||||
);
|
||||
|
||||
for (const recipient of recipients) {
|
||||
await this.sendMessage(recipient, type, payload, {
|
||||
...options,
|
||||
expectResponse: false,
|
||||
});
|
||||
}
|
||||
|
||||
this.emit('message:broadcast', { type, recipientCount: recipients.length });
|
||||
}
|
||||
|
||||
/**
|
||||
* Receive and handle message
|
||||
*/
|
||||
async receiveMessage(message: Message): Promise<void> {
|
||||
// Check if message is expired
|
||||
if (Date.now() - message.timestamp > message.ttl) {
|
||||
console.warn(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Received expired message ${message.id}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Received ${message.type} message ${message.id} from ${message.from}`
|
||||
);
|
||||
|
||||
// Handle different message types
|
||||
switch (message.type) {
|
||||
case 'request':
|
||||
await this.handleRequest(message);
|
||||
break;
|
||||
|
||||
case 'response':
|
||||
await this.handleResponse(message);
|
||||
break;
|
||||
|
||||
case 'broadcast':
|
||||
await this.handleBroadcast(message);
|
||||
break;
|
||||
|
||||
case 'consensus':
|
||||
await this.handleConsensusMessage(message);
|
||||
break;
|
||||
|
||||
default:
|
||||
console.warn(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Unknown message type: ${message.type}`
|
||||
);
|
||||
}
|
||||
|
||||
// Update last contact time
|
||||
this.lastHeartbeat.set(message.from, Date.now());
|
||||
this.knownNodes.add(message.from);
|
||||
|
||||
this.emit('message:received', message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle request message
|
||||
*/
|
||||
private async handleRequest(message: Message): Promise<void> {
|
||||
this.emit('request:received', message);
|
||||
|
||||
// Application can handle request and send response
|
||||
// Example auto-response for health checks
|
||||
if (message.payload.type === 'health_check') {
|
||||
await this.sendResponse(message.id, message.from, {
|
||||
status: 'healthy',
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send response to a request
|
||||
*/
|
||||
async sendResponse(requestId: string, to: string, payload: any): Promise<void> {
|
||||
const response: Message = {
|
||||
id: `resp-${requestId}`,
|
||||
type: 'response',
|
||||
from: this.config.nodeId,
|
||||
to,
|
||||
payload: {
|
||||
requestId,
|
||||
...payload,
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
ttl: this.config.messageTimeout,
|
||||
priority: 1,
|
||||
};
|
||||
|
||||
await this.sendMessage(to, 'response', response.payload);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle response message
|
||||
*/
|
||||
private async handleResponse(message: Message): Promise<void> {
|
||||
const requestId = message.payload.requestId;
|
||||
const pending = this.pendingResponses.get(requestId);
|
||||
|
||||
if (pending) {
|
||||
clearTimeout(pending.timeout);
|
||||
pending.resolve(message.payload);
|
||||
this.pendingResponses.delete(requestId);
|
||||
}
|
||||
|
||||
this.emit('response:received', message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle broadcast message
|
||||
*/
|
||||
private async handleBroadcast(message: Message): Promise<void> {
|
||||
// If message has topic, deliver to topic subscribers
|
||||
if (message.topic) {
|
||||
const topic = this.pubSubTopics.get(message.topic);
|
||||
if (topic) {
|
||||
this.deliverToTopic(message, topic);
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('broadcast:received', message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Propose consensus for critical operation
|
||||
*/
|
||||
async proposeConsensus(
|
||||
type: ConsensusProposal['type'],
|
||||
data: any,
|
||||
requiredVotes: number = Math.floor(this.knownNodes.size / 2) + 1
|
||||
): Promise<boolean> {
|
||||
const proposal: ConsensusProposal = {
|
||||
id: `consensus-${this.config.nodeId}-${Date.now()}`,
|
||||
proposer: this.config.nodeId,
|
||||
type,
|
||||
data,
|
||||
requiredVotes,
|
||||
deadline: Date.now() + this.config.consensusTimeout,
|
||||
votes: new Map([[this.config.nodeId, true]]), // Proposer votes yes
|
||||
status: 'pending',
|
||||
};
|
||||
|
||||
this.consensusProposals.set(proposal.id, proposal);
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Proposing consensus ${proposal.id} (type: ${type})`
|
||||
);
|
||||
|
||||
// Broadcast consensus proposal
|
||||
await this.broadcastMessage('consensus', {
|
||||
action: 'propose',
|
||||
proposal: {
|
||||
id: proposal.id,
|
||||
proposer: proposal.proposer,
|
||||
type: proposal.type,
|
||||
data: proposal.data,
|
||||
requiredVotes: proposal.requiredVotes,
|
||||
deadline: proposal.deadline,
|
||||
},
|
||||
});
|
||||
|
||||
// Wait for consensus
|
||||
return new Promise((resolve) => {
|
||||
const checkInterval = setInterval(() => {
|
||||
const currentProposal = this.consensusProposals.get(proposal.id);
|
||||
|
||||
if (!currentProposal) {
|
||||
clearInterval(checkInterval);
|
||||
resolve(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (currentProposal.status === 'accepted') {
|
||||
clearInterval(checkInterval);
|
||||
resolve(true);
|
||||
} else if (
|
||||
currentProposal.status === 'rejected' ||
|
||||
currentProposal.status === 'expired'
|
||||
) {
|
||||
clearInterval(checkInterval);
|
||||
resolve(false);
|
||||
} else if (Date.now() > currentProposal.deadline) {
|
||||
currentProposal.status = 'expired';
|
||||
clearInterval(checkInterval);
|
||||
resolve(false);
|
||||
}
|
||||
}, 100);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle consensus message
|
||||
*/
|
||||
private async handleConsensusMessage(message: Message): Promise<void> {
|
||||
const { action, proposal, vote } = message.payload;
|
||||
|
||||
switch (action) {
|
||||
case 'propose':
|
||||
// New proposal received
|
||||
await this.handleConsensusProposal(proposal, message.from);
|
||||
break;
|
||||
|
||||
case 'vote':
|
||||
// Vote received for proposal
|
||||
await this.handleConsensusVote(vote.proposalId, message.from, vote.approve);
|
||||
break;
|
||||
|
||||
default:
|
||||
console.warn(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Unknown consensus action: ${action}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle consensus proposal
|
||||
*/
|
||||
private async handleConsensusProposal(proposalData: any, from: string): Promise<void> {
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Received consensus proposal ${proposalData.id} from ${from}`
|
||||
);
|
||||
|
||||
// Store proposal
|
||||
const proposal: ConsensusProposal = {
|
||||
...proposalData,
|
||||
votes: new Map([[proposalData.proposer, true]]),
|
||||
status: 'pending' as const,
|
||||
};
|
||||
|
||||
this.consensusProposals.set(proposal.id, proposal);
|
||||
|
||||
// Emit event for application to decide
|
||||
this.emit('consensus:proposed', proposal);
|
||||
|
||||
// Auto-approve for demo (in production, application decides)
|
||||
const approve = true;
|
||||
|
||||
// Send vote
|
||||
await this.sendMessage(proposal.proposer, 'consensus', {
|
||||
action: 'vote',
|
||||
vote: {
|
||||
proposalId: proposal.id,
|
||||
approve,
|
||||
voter: this.config.nodeId,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle consensus vote
|
||||
*/
|
||||
private async handleConsensusVote(
|
||||
proposalId: string,
|
||||
voter: string,
|
||||
approve: boolean
|
||||
): Promise<void> {
|
||||
const proposal = this.consensusProposals.get(proposalId);
|
||||
|
||||
if (!proposal || proposal.status !== 'pending') {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Received ${approve ? 'approval' : 'rejection'} vote from ${voter} for proposal ${proposalId}`
|
||||
);
|
||||
|
||||
// Record vote
|
||||
proposal.votes.set(voter, approve);
|
||||
|
||||
// Count votes
|
||||
const approvals = Array.from(proposal.votes.values()).filter(v => v).length;
|
||||
const rejections = proposal.votes.size - approvals;
|
||||
|
||||
// Check if consensus reached
|
||||
if (approvals >= proposal.requiredVotes) {
|
||||
proposal.status = 'accepted';
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Consensus ${proposalId} accepted (${approvals}/${proposal.requiredVotes} votes)`
|
||||
);
|
||||
this.emit('consensus:accepted', proposal);
|
||||
} else if (rejections > this.knownNodes.size - proposal.requiredVotes) {
|
||||
proposal.status = 'rejected';
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Consensus ${proposalId} rejected (${rejections} rejections)`
|
||||
);
|
||||
this.emit('consensus:rejected', proposal);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create pub/sub topic
|
||||
*/
|
||||
createTopic(name: string, maxHistorySize: number = 100): void {
|
||||
if (this.pubSubTopics.has(name)) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Topic ${name} already exists`);
|
||||
return;
|
||||
}
|
||||
|
||||
const topic: PubSubTopic = {
|
||||
name,
|
||||
subscribers: new Set(),
|
||||
messageHistory: [],
|
||||
maxHistorySize,
|
||||
};
|
||||
|
||||
this.pubSubTopics.set(name, topic);
|
||||
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Created topic: ${name}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Subscribe to pub/sub topic
|
||||
*/
|
||||
subscribe(topicName: string, subscriberId: string): void {
|
||||
const topic = this.pubSubTopics.get(topicName);
|
||||
|
||||
if (!topic) {
|
||||
throw new Error(`Topic ${topicName} does not exist`);
|
||||
}
|
||||
|
||||
topic.subscribers.add(subscriberId);
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Node ${subscriberId} subscribed to topic ${topicName}`
|
||||
);
|
||||
|
||||
this.emit('topic:subscribed', { topicName, subscriberId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Unsubscribe from pub/sub topic
|
||||
*/
|
||||
unsubscribe(topicName: string, subscriberId: string): void {
|
||||
const topic = this.pubSubTopics.get(topicName);
|
||||
|
||||
if (!topic) {
|
||||
return;
|
||||
}
|
||||
|
||||
topic.subscribers.delete(subscriberId);
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Node ${subscriberId} unsubscribed from topic ${topicName}`
|
||||
);
|
||||
|
||||
this.emit('topic:unsubscribed', { topicName, subscriberId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Publish message to topic
|
||||
*/
|
||||
async publishToTopic(topicName: string, payload: any): Promise<void> {
|
||||
const topic = this.pubSubTopics.get(topicName);
|
||||
|
||||
if (!topic) {
|
||||
throw new Error(`Topic ${topicName} does not exist`);
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Publishing to topic ${topicName} (${topic.subscribers.size} subscribers)`
|
||||
);
|
||||
|
||||
// Broadcast to all subscribers
|
||||
for (const subscriber of topic.subscribers) {
|
||||
await this.sendMessage(subscriber, 'broadcast', payload, {
|
||||
topic: topicName,
|
||||
});
|
||||
}
|
||||
|
||||
// Store in message history
|
||||
const message: Message = {
|
||||
id: `topic-${topicName}-${Date.now()}`,
|
||||
type: 'broadcast',
|
||||
from: this.config.nodeId,
|
||||
topic: topicName,
|
||||
payload,
|
||||
timestamp: Date.now(),
|
||||
ttl: this.config.messageTimeout,
|
||||
priority: 0,
|
||||
};
|
||||
|
||||
topic.messageHistory.push(message);
|
||||
|
||||
// Trim history if needed
|
||||
if (topic.messageHistory.length > topic.maxHistorySize) {
|
||||
topic.messageHistory.shift();
|
||||
}
|
||||
|
||||
this.emit('topic:published', { topicName, message });
|
||||
}
|
||||
|
||||
/**
|
||||
* Deliver message to topic subscribers
|
||||
*/
|
||||
private deliverToTopic(message: Message, topic: PubSubTopic): void {
|
||||
// Store in history
|
||||
topic.messageHistory.push(message);
|
||||
|
||||
if (topic.messageHistory.length > topic.maxHistorySize) {
|
||||
topic.messageHistory.shift();
|
||||
}
|
||||
|
||||
// Emit to local subscribers
|
||||
this.emit('topic:message', {
|
||||
topicName: topic.name,
|
||||
message,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue message for processing
|
||||
*/
|
||||
private enqueueMessage(message: Message): void {
|
||||
if (this.messageQueue.length >= this.config.maxMessageQueueSize) {
|
||||
console.warn(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Message queue full, dropping lowest priority message`
|
||||
);
|
||||
|
||||
// Remove lowest priority message
|
||||
this.messageQueue.sort((a, b) => b.priority - a.priority);
|
||||
this.messageQueue.pop();
|
||||
}
|
||||
|
||||
// Insert message by priority
|
||||
let insertIndex = this.messageQueue.findIndex(m => m.priority < message.priority);
|
||||
if (insertIndex === -1) {
|
||||
this.messageQueue.push(message);
|
||||
} else {
|
||||
this.messageQueue.splice(insertIndex, 0, message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start message processing loop
|
||||
*/
|
||||
private startMessageProcessing(): void {
|
||||
this.messageProcessingTimer = setInterval(() => {
|
||||
this.processMessages();
|
||||
}, 10); // Process every 10ms
|
||||
}
|
||||
|
||||
/**
|
||||
* Process queued messages
|
||||
*/
|
||||
private async processMessages(): Promise<void> {
|
||||
while (this.messageQueue.length > 0) {
|
||||
const message = this.messageQueue.shift()!;
|
||||
|
||||
// Check if message expired
|
||||
if (Date.now() - message.timestamp > message.ttl) {
|
||||
console.warn(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Message ${message.id} expired before processing`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Simulate message transmission (replace with actual network call)
|
||||
this.emit('message:transmit', message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start heartbeat mechanism
|
||||
*/
|
||||
private startHeartbeat(): void {
|
||||
this.heartbeatTimer = setInterval(() => {
|
||||
this.sendHeartbeat();
|
||||
this.checkNodeHealth();
|
||||
}, this.config.heartbeatInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send heartbeat to all known nodes
|
||||
*/
|
||||
private async sendHeartbeat(): Promise<void> {
|
||||
await this.broadcastMessage('request', {
|
||||
type: 'heartbeat',
|
||||
nodeId: this.config.nodeId,
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check health of known nodes
|
||||
*/
|
||||
private checkNodeHealth(): void {
|
||||
const now = Date.now();
|
||||
const unhealthyThreshold = this.config.heartbeatInterval * 3;
|
||||
|
||||
for (const [nodeId, lastSeen] of this.lastHeartbeat.entries()) {
|
||||
if (now - lastSeen > unhealthyThreshold) {
|
||||
console.warn(
|
||||
`[CoordinationProtocol:${this.config.nodeId}] Node ${nodeId} appears unhealthy (last seen ${Math.floor((now - lastSeen) / 1000)}s ago)`
|
||||
);
|
||||
|
||||
this.emit('node:unhealthy', { nodeId, lastSeen });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a node in the network
|
||||
*/
|
||||
registerNode(nodeId: string): void {
|
||||
this.knownNodes.add(nodeId);
|
||||
this.lastHeartbeat.set(nodeId, Date.now());
|
||||
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Registered node: ${nodeId}`);
|
||||
|
||||
this.emit('node:registered', { nodeId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister a node from the network
|
||||
*/
|
||||
unregisterNode(nodeId: string): void {
|
||||
this.knownNodes.delete(nodeId);
|
||||
this.lastHeartbeat.delete(nodeId);
|
||||
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Unregistered node: ${nodeId}`);
|
||||
|
||||
this.emit('node:unregistered', { nodeId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get protocol status
|
||||
*/
|
||||
getStatus(): {
|
||||
nodeId: string;
|
||||
knownNodes: number;
|
||||
queuedMessages: number;
|
||||
pendingResponses: number;
|
||||
activeConsensus: number;
|
||||
topics: string[];
|
||||
} {
|
||||
return {
|
||||
nodeId: this.config.nodeId,
|
||||
knownNodes: this.knownNodes.size,
|
||||
queuedMessages: this.messageQueue.length,
|
||||
pendingResponses: this.pendingResponses.size,
|
||||
activeConsensus: Array.from(this.consensusProposals.values()).filter(
|
||||
p => p.status === 'pending'
|
||||
).length,
|
||||
topics: Array.from(this.pubSubTopics.keys()),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown protocol gracefully
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
console.log(`[CoordinationProtocol:${this.config.nodeId}] Shutting down protocol...`);
|
||||
|
||||
// Stop timers
|
||||
if (this.heartbeatTimer) {
|
||||
clearInterval(this.heartbeatTimer);
|
||||
}
|
||||
if (this.messageProcessingTimer) {
|
||||
clearInterval(this.messageProcessingTimer);
|
||||
}
|
||||
|
||||
// Process remaining messages
|
||||
await this.processMessages();
|
||||
|
||||
// Clear pending responses
|
||||
for (const [messageId, pending] of this.pendingResponses.entries()) {
|
||||
clearTimeout(pending.timeout);
|
||||
pending.reject(new Error('Protocol shutting down'));
|
||||
}
|
||||
this.pendingResponses.clear();
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks post-task --task-id "protocol-${this.config.nodeId}-shutdown"`
|
||||
);
|
||||
} catch (error) {
|
||||
console.warn(`[CoordinationProtocol:${this.config.nodeId}] Error executing shutdown hooks`);
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('protocol:shutdown');
|
||||
}
|
||||
}
|
||||
11
vendor/ruvector/npm/packages/agentic-integration/integration-tests.d.ts
vendored
Normal file
11
vendor/ruvector/npm/packages/agentic-integration/integration-tests.d.ts
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
/**
|
||||
* Integration Tests - Comprehensive tests for agentic coordination
|
||||
*
|
||||
* Tests:
|
||||
* - Multi-agent coordination
|
||||
* - Failover scenarios
|
||||
* - Load distribution
|
||||
* - Performance benchmarks
|
||||
*/
|
||||
export {};
|
||||
//# sourceMappingURL=integration-tests.d.ts.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/integration-tests.d.ts.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/integration-tests.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"integration-tests.d.ts","sourceRoot":"","sources":["integration-tests.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG"}
|
||||
669
vendor/ruvector/npm/packages/agentic-integration/integration-tests.js
vendored
Normal file
669
vendor/ruvector/npm/packages/agentic-integration/integration-tests.js
vendored
Normal file
@@ -0,0 +1,669 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Integration Tests - Comprehensive tests for agentic coordination
|
||||
*
|
||||
* Tests:
|
||||
* - Multi-agent coordination
|
||||
* - Failover scenarios
|
||||
* - Load distribution
|
||||
* - Performance benchmarks
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const agent_coordinator_1 = require("./agent-coordinator");
|
||||
const regional_agent_1 = require("./regional-agent");
|
||||
const swarm_manager_1 = require("./swarm-manager");
|
||||
const coordination_protocol_1 = require("./coordination-protocol");
|
||||
/**
|
||||
* Test utilities
|
||||
*/
|
||||
class TestUtils {
|
||||
static async sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
static generateRandomVector(dimensions) {
|
||||
return Array.from({ length: dimensions }, () => Math.random());
|
||||
}
|
||||
static async measureLatency(fn) {
|
||||
const start = Date.now();
|
||||
const result = await fn();
|
||||
const latency = Date.now() - start;
|
||||
return { result, latency };
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Test Suite 1: Agent Coordinator Tests
|
||||
*/
|
||||
describe('AgentCoordinator', () => {
|
||||
let coordinator;
|
||||
beforeEach(() => {
|
||||
const config = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'round-robin',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false, // Disable for testing
|
||||
};
|
||||
coordinator = new agent_coordinator_1.AgentCoordinator(config);
|
||||
});
|
||||
afterEach(async () => {
|
||||
await coordinator.shutdown();
|
||||
});
|
||||
test('should register agents successfully', async () => {
|
||||
const registration = {
|
||||
agentId: 'test-agent-1',
|
||||
region: 'us-east',
|
||||
endpoint: 'https://us-east.ruvector.io/agent/test-agent-1',
|
||||
capabilities: ['query', 'index'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
};
|
||||
await coordinator.registerAgent(registration);
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.totalAgents).toBe(1);
|
||||
expect(status.regionDistribution['us-east']).toBe(1);
|
||||
});
|
||||
test('should distribute tasks using round-robin', async () => {
|
||||
// Register multiple agents
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await coordinator.registerAgent({
|
||||
agentId: `agent-${i}`,
|
||||
region: 'us-east',
|
||||
endpoint: `https://us-east.ruvector.io/agent/agent-${i}`,
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
}
|
||||
// Submit tasks
|
||||
const taskIds = [];
|
||||
for (let i = 0; i < 6; i++) {
|
||||
const taskId = await coordinator.submitTask({
|
||||
type: 'query',
|
||||
payload: { query: `test-query-${i}` },
|
||||
priority: 1,
|
||||
maxRetries: 3,
|
||||
});
|
||||
taskIds.push(taskId);
|
||||
}
|
||||
expect(taskIds.length).toBe(6);
|
||||
await TestUtils.sleep(1000);
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.queuedTasks + status.activeTasks).toBeGreaterThan(0);
|
||||
});
|
||||
test('should handle agent failures with circuit breaker', async () => {
|
||||
const registration = {
|
||||
agentId: 'failing-agent',
|
||||
region: 'us-west',
|
||||
endpoint: 'https://us-west.ruvector.io/agent/failing-agent',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
};
|
||||
await coordinator.registerAgent(registration);
|
||||
// Simulate agent going unhealthy
|
||||
coordinator.updateAgentMetrics({
|
||||
agentId: 'failing-agent',
|
||||
region: 'us-west',
|
||||
cpuUsage: 95,
|
||||
memoryUsage: 95,
|
||||
activeStreams: 1000,
|
||||
queryLatency: 5000,
|
||||
timestamp: Date.now(),
|
||||
healthy: false,
|
||||
});
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.healthyAgents).toBe(0);
|
||||
});
|
||||
test('should enforce max agents per region', async () => {
|
||||
const config = {
|
||||
maxAgentsPerRegion: 2,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'round-robin',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
const limitedCoordinator = new agent_coordinator_1.AgentCoordinator(config);
|
||||
// Register agents
|
||||
await limitedCoordinator.registerAgent({
|
||||
agentId: 'agent-1',
|
||||
region: 'eu-west',
|
||||
endpoint: 'https://eu-west.ruvector.io/agent/agent-1',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
await limitedCoordinator.registerAgent({
|
||||
agentId: 'agent-2',
|
||||
region: 'eu-west',
|
||||
endpoint: 'https://eu-west.ruvector.io/agent/agent-2',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
// Third agent should fail
|
||||
await expect(limitedCoordinator.registerAgent({
|
||||
agentId: 'agent-3',
|
||||
region: 'eu-west',
|
||||
endpoint: 'https://eu-west.ruvector.io/agent/agent-3',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
})).rejects.toThrow('has reached max agent capacity');
|
||||
await limitedCoordinator.shutdown();
|
||||
});
|
||||
});
|
||||
/**
|
||||
* Test Suite 2: Regional Agent Tests
|
||||
*/
|
||||
describe('RegionalAgent', () => {
|
||||
let agent;
|
||||
beforeEach(() => {
|
||||
const config = {
|
||||
agentId: 'test-agent-us-east-1',
|
||||
region: 'us-east',
|
||||
coordinatorEndpoint: 'coordinator.ruvector.io',
|
||||
localStoragePath: '/tmp/test-agent',
|
||||
maxConcurrentStreams: 100,
|
||||
metricsReportInterval: 5000,
|
||||
syncInterval: 2000,
|
||||
enableClaudeFlowHooks: false,
|
||||
vectorDimensions: 768,
|
||||
capabilities: ['query', 'index', 'sync'],
|
||||
};
|
||||
agent = new regional_agent_1.RegionalAgent(config);
|
||||
});
|
||||
afterEach(async () => {
|
||||
await agent.shutdown();
|
||||
});
|
||||
test('should process query successfully', async () => {
|
||||
// Index some vectors
|
||||
await agent.indexVectors([
|
||||
{
|
||||
id: 'vec-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'test' },
|
||||
},
|
||||
{
|
||||
id: 'vec-2',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'test' },
|
||||
},
|
||||
]);
|
||||
// Query
|
||||
const result = await agent.processQuery({
|
||||
id: 'query-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 2,
|
||||
timeout: 5000,
|
||||
});
|
||||
expect(result.matches.length).toBeGreaterThan(0);
|
||||
expect(result.region).toBe('us-east');
|
||||
expect(result.latency).toBeGreaterThan(0);
|
||||
});
|
||||
test('should validate query dimensions', async () => {
|
||||
await expect(agent.processQuery({
|
||||
id: 'query-invalid',
|
||||
vector: TestUtils.generateRandomVector(512), // Wrong dimension
|
||||
topK: 10,
|
||||
timeout: 5000,
|
||||
})).rejects.toThrow('Invalid vector dimensions');
|
||||
});
|
||||
test('should apply filters in query', async () => {
|
||||
// Index vectors with different metadata
|
||||
await agent.indexVectors([
|
||||
{
|
||||
id: 'vec-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'A', type: 'test' },
|
||||
},
|
||||
{
|
||||
id: 'vec-2',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'B', type: 'test' },
|
||||
},
|
||||
{
|
||||
id: 'vec-3',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'A', type: 'prod' },
|
||||
},
|
||||
]);
|
||||
// Query with filter
|
||||
const result = await agent.processQuery({
|
||||
id: 'query-filtered',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 10,
|
||||
filters: { category: 'A' },
|
||||
timeout: 5000,
|
||||
});
|
||||
// Should only return vectors with category 'A'
|
||||
expect(result.matches.length).toBeGreaterThan(0);
|
||||
});
|
||||
test('should enforce rate limiting', async () => {
|
||||
// Try to exceed max concurrent streams
|
||||
const promises = [];
|
||||
for (let i = 0; i < 150; i++) {
|
||||
promises.push(agent.processQuery({
|
||||
id: `query-${i}`,
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 5,
|
||||
timeout: 5000,
|
||||
}).catch(err => err));
|
||||
}
|
||||
const results = await Promise.all(promises);
|
||||
const rateLimitErrors = results.filter(r => r instanceof Error && r.message.includes('Rate limit'));
|
||||
expect(rateLimitErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
test('should handle sync payloads from other regions', async () => {
|
||||
const syncPayload = {
|
||||
type: 'index',
|
||||
data: [
|
||||
{
|
||||
id: 'sync-vec-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { synced: true },
|
||||
},
|
||||
],
|
||||
timestamp: Date.now(),
|
||||
sourceRegion: 'us-west',
|
||||
};
|
||||
await agent.handleSyncPayload(syncPayload);
|
||||
const status = agent.getStatus();
|
||||
expect(status.indexSize).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
/**
|
||||
* Test Suite 3: Swarm Manager Tests
|
||||
*/
|
||||
describe('SwarmManager', () => {
|
||||
let coordinator;
|
||||
let swarmManager;
|
||||
beforeEach(() => {
|
||||
const coordinatorConfig = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'adaptive',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
coordinator = new agent_coordinator_1.AgentCoordinator(coordinatorConfig);
|
||||
const swarmConfig = {
|
||||
topology: 'mesh',
|
||||
minAgentsPerRegion: 1,
|
||||
maxAgentsPerRegion: 5,
|
||||
scaleUpThreshold: 80,
|
||||
scaleDownThreshold: 20,
|
||||
scaleUpCooldown: 30000,
|
||||
scaleDownCooldown: 60000,
|
||||
healthCheckInterval: 5000,
|
||||
enableAutoScaling: true,
|
||||
enableClaudeFlowHooks: false,
|
||||
regions: ['us-east', 'us-west', 'eu-west'],
|
||||
};
|
||||
swarmManager = new swarm_manager_1.SwarmManager(swarmConfig, coordinator);
|
||||
});
|
||||
afterEach(async () => {
|
||||
await swarmManager.shutdown();
|
||||
await coordinator.shutdown();
|
||||
});
|
||||
test('should spawn initial agents for all regions', async () => {
|
||||
await TestUtils.sleep(1000); // Wait for initialization
|
||||
const status = swarmManager.getStatus();
|
||||
expect(status.totalAgents).toBeGreaterThanOrEqual(3); // At least 1 per region
|
||||
expect(Object.keys(status.metrics.regionMetrics).length).toBe(3);
|
||||
});
|
||||
test('should spawn additional agents in specific region', async () => {
|
||||
const initialStatus = swarmManager.getStatus();
|
||||
const initialCount = initialStatus.totalAgents;
|
||||
await swarmManager.spawnAgent('us-east');
|
||||
const newStatus = swarmManager.getStatus();
|
||||
expect(newStatus.totalAgents).toBe(initialCount + 1);
|
||||
});
|
||||
test('should calculate swarm metrics correctly', async () => {
|
||||
await TestUtils.sleep(1000);
|
||||
const metrics = swarmManager.calculateSwarmMetrics();
|
||||
expect(metrics.totalAgents).toBeGreaterThan(0);
|
||||
expect(metrics.regionMetrics).toBeDefined();
|
||||
expect(Object.keys(metrics.regionMetrics).length).toBe(3);
|
||||
for (const region of ['us-east', 'us-west', 'eu-west']) {
|
||||
expect(metrics.regionMetrics[region]).toBeDefined();
|
||||
expect(metrics.regionMetrics[region].agentCount).toBeGreaterThan(0);
|
||||
}
|
||||
});
|
||||
test('should despawn agent and redistribute tasks', async () => {
|
||||
await TestUtils.sleep(1000);
|
||||
const status = swarmManager.getStatus();
|
||||
const agentIds = Object.keys(status.metrics.regionMetrics);
|
||||
if (agentIds.length > 0) {
|
||||
const initialCount = status.totalAgents;
|
||||
// Get first agent ID from any region
|
||||
const regionMetrics = Object.values(status.metrics.regionMetrics);
|
||||
const firstRegion = regionMetrics[0];
|
||||
// We'll need to track spawned agents to despawn them
|
||||
// For now, just verify the mechanism works
|
||||
expect(initialCount).toBeGreaterThan(0);
|
||||
}
|
||||
});
|
||||
});
|
||||
/**
|
||||
* Test Suite 4: Coordination Protocol Tests
|
||||
*/
|
||||
describe('CoordinationProtocol', () => {
|
||||
let protocol1;
|
||||
let protocol2;
|
||||
beforeEach(() => {
|
||||
const config1 = {
|
||||
nodeId: 'node-1',
|
||||
heartbeatInterval: 2000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: ['sync', 'metrics', 'alerts'],
|
||||
};
|
||||
const config2 = {
|
||||
nodeId: 'node-2',
|
||||
heartbeatInterval: 2000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: ['sync', 'metrics', 'alerts'],
|
||||
};
|
||||
protocol1 = new coordination_protocol_1.CoordinationProtocol(config1);
|
||||
protocol2 = new coordination_protocol_1.CoordinationProtocol(config2);
|
||||
// Connect protocols
|
||||
protocol1.registerNode('node-2');
|
||||
protocol2.registerNode('node-1');
|
||||
// Set up message forwarding
|
||||
protocol1.on('message:transmit', (message) => {
|
||||
if (message.to === 'node-2' || !message.to) {
|
||||
protocol2.receiveMessage(message);
|
||||
}
|
||||
});
|
||||
protocol2.on('message:transmit', (message) => {
|
||||
if (message.to === 'node-1' || !message.to) {
|
||||
protocol1.receiveMessage(message);
|
||||
}
|
||||
});
|
||||
});
|
||||
afterEach(async () => {
|
||||
await protocol1.shutdown();
|
||||
await protocol2.shutdown();
|
||||
});
|
||||
test('should send and receive messages between nodes', async () => {
|
||||
let receivedMessage = false;
|
||||
protocol2.on('request:received', (message) => {
|
||||
receivedMessage = true;
|
||||
expect(message.from).toBe('node-1');
|
||||
});
|
||||
await protocol1.sendMessage('node-2', 'request', { test: 'data' });
|
||||
await TestUtils.sleep(100);
|
||||
expect(receivedMessage).toBe(true);
|
||||
});
|
||||
test('should handle request-response pattern', async () => {
|
||||
protocol2.on('request:received', async (message) => {
|
||||
await protocol2.sendResponse(message.id, message.from, {
|
||||
status: 'ok',
|
||||
data: 'response',
|
||||
});
|
||||
});
|
||||
const response = await protocol1.sendMessage('node-2', 'request', { query: 'test' }, { expectResponse: true });
|
||||
expect(response.status).toBe('ok');
|
||||
});
|
||||
test('should broadcast messages to all nodes', async () => {
|
||||
let received = false;
|
||||
protocol2.on('broadcast:received', (message) => {
|
||||
received = true;
|
||||
expect(message.type).toBe('broadcast');
|
||||
});
|
||||
await protocol1.broadcastMessage('broadcast', { event: 'test' });
|
||||
await TestUtils.sleep(100);
|
||||
expect(received).toBe(true);
|
||||
});
|
||||
test('should handle consensus proposals', async () => {
|
||||
// Node 2 auto-approves proposals
|
||||
protocol2.on('consensus:proposed', async (proposal) => {
|
||||
// Auto-approve handled internally in test setup
|
||||
});
|
||||
const approved = await protocol1.proposeConsensus('schema_change', { change: 'add_field' }, 1 // Only need 1 vote (from proposer)
|
||||
);
|
||||
expect(approved).toBe(true);
|
||||
});
|
||||
test('should handle pub/sub topics', async () => {
|
||||
let receivedMessage = false;
|
||||
// Subscribe node 2 to 'sync' topic
|
||||
protocol2.subscribe('sync', 'node-2');
|
||||
protocol2.on('topic:message', (data) => {
|
||||
if (data.topicName === 'sync') {
|
||||
receivedMessage = true;
|
||||
expect(data.message.payload.data).toBe('sync-data');
|
||||
}
|
||||
});
|
||||
// Publish to topic
|
||||
await protocol1.publishToTopic('sync', { data: 'sync-data' });
|
||||
await TestUtils.sleep(100);
|
||||
expect(receivedMessage).toBe(true);
|
||||
});
|
||||
test('should detect unhealthy nodes', async () => {
|
||||
let unhealthyDetected = false;
|
||||
protocol1.on('node:unhealthy', (data) => {
|
||||
unhealthyDetected = true;
|
||||
expect(data.nodeId).toBe('node-2');
|
||||
});
|
||||
// Stop node 2 heartbeat
|
||||
await protocol2.shutdown();
|
||||
// Wait for health check to detect
|
||||
await TestUtils.sleep(7000);
|
||||
expect(unhealthyDetected).toBe(true);
|
||||
});
|
||||
});
|
||||
/**
|
||||
* Test Suite 5: Performance Benchmarks
|
||||
*/
|
||||
describe('Performance Benchmarks', () => {
|
||||
test('should handle high query throughput', async () => {
|
||||
const config = {
|
||||
agentId: 'perf-agent',
|
||||
region: 'us-east',
|
||||
coordinatorEndpoint: 'coordinator.ruvector.io',
|
||||
localStoragePath: '/tmp/perf-agent',
|
||||
maxConcurrentStreams: 1000,
|
||||
metricsReportInterval: 30000,
|
||||
syncInterval: 5000,
|
||||
enableClaudeFlowHooks: false,
|
||||
vectorDimensions: 768,
|
||||
capabilities: ['query'],
|
||||
};
|
||||
const agent = new regional_agent_1.RegionalAgent(config);
|
||||
// Index vectors
|
||||
const vectors = Array.from({ length: 10000 }, (_, i) => ({
|
||||
id: `vec-${i}`,
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { index: i },
|
||||
}));
|
||||
await agent.indexVectors(vectors);
|
||||
// Run queries
|
||||
const queryCount = 1000;
|
||||
const queries = [];
|
||||
const startTime = Date.now();
|
||||
for (let i = 0; i < queryCount; i++) {
|
||||
queries.push(agent.processQuery({
|
||||
id: `perf-query-${i}`,
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 10,
|
||||
timeout: 5000,
|
||||
}).catch(() => null) // Ignore rate limit errors
|
||||
);
|
||||
}
|
||||
const results = await Promise.all(queries);
|
||||
const successfulQueries = results.filter(r => r !== null);
|
||||
const totalTime = Date.now() - startTime;
|
||||
const qps = (successfulQueries.length / totalTime) * 1000;
|
||||
console.log(`\nPerformance Benchmark:`);
|
||||
console.log(`Total queries: ${queryCount}`);
|
||||
console.log(`Successful: ${successfulQueries.length}`);
|
||||
console.log(`Time: ${totalTime}ms`);
|
||||
console.log(`QPS: ${qps.toFixed(2)}`);
|
||||
expect(successfulQueries.length).toBeGreaterThan(0);
|
||||
expect(qps).toBeGreaterThan(1); // At least 1 QPS
|
||||
await agent.shutdown();
|
||||
});
|
||||
test('should scale agents based on load', async () => {
|
||||
const coordinatorConfig = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'adaptive',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
const coordinator = new agent_coordinator_1.AgentCoordinator(coordinatorConfig);
|
||||
const swarmConfig = {
|
||||
topology: 'mesh',
|
||||
minAgentsPerRegion: 1,
|
||||
maxAgentsPerRegion: 5,
|
||||
scaleUpThreshold: 70,
|
||||
scaleDownThreshold: 30,
|
||||
scaleUpCooldown: 1000, // Short cooldown for testing
|
||||
scaleDownCooldown: 2000,
|
||||
healthCheckInterval: 1000,
|
||||
enableAutoScaling: true,
|
||||
enableClaudeFlowHooks: false,
|
||||
regions: ['us-east'],
|
||||
};
|
||||
const swarmManager = new swarm_manager_1.SwarmManager(swarmConfig, coordinator);
|
||||
await TestUtils.sleep(1000);
|
||||
const initialCount = swarmManager.getStatus().totalAgents;
|
||||
// Spawn additional agents to simulate scale-up
|
||||
await swarmManager.spawnAgent('us-east');
|
||||
await swarmManager.spawnAgent('us-east');
|
||||
await TestUtils.sleep(500);
|
||||
const scaledCount = swarmManager.getStatus().totalAgents;
|
||||
expect(scaledCount).toBeGreaterThan(initialCount);
|
||||
await swarmManager.shutdown();
|
||||
await coordinator.shutdown();
|
||||
}, 15000);
|
||||
});
|
||||
/**
|
||||
* Test Suite 6: Failover Scenarios
|
||||
*/
|
||||
describe('Failover Scenarios', () => {
|
||||
test('should handle agent failure and task redistribution', async () => {
|
||||
const coordinatorConfig = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 1000,
|
||||
taskTimeout: 5000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 2000,
|
||||
loadBalancingStrategy: 'round-robin',
|
||||
failoverThreshold: 2,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
const coordinator = new agent_coordinator_1.AgentCoordinator(coordinatorConfig);
|
||||
// Register two agents
|
||||
await coordinator.registerAgent({
|
||||
agentId: 'agent-1',
|
||||
region: 'us-east',
|
||||
endpoint: 'https://us-east.ruvector.io/agent/agent-1',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
await coordinator.registerAgent({
|
||||
agentId: 'agent-2',
|
||||
region: 'us-east',
|
||||
endpoint: 'https://us-east.ruvector.io/agent/agent-2',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
// Submit tasks
|
||||
await coordinator.submitTask({
|
||||
type: 'query',
|
||||
payload: { query: 'test' },
|
||||
priority: 1,
|
||||
maxRetries: 3,
|
||||
});
|
||||
// Simulate agent-1 failure
|
||||
coordinator.updateAgentMetrics({
|
||||
agentId: 'agent-1',
|
||||
region: 'us-east',
|
||||
cpuUsage: 100,
|
||||
memoryUsage: 100,
|
||||
activeStreams: 1000,
|
||||
queryLatency: 10000,
|
||||
timestamp: Date.now(),
|
||||
healthy: false,
|
||||
});
|
||||
await TestUtils.sleep(2000);
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.healthyAgents).toBe(1); // Only agent-2 healthy
|
||||
await coordinator.shutdown();
|
||||
});
|
||||
test('should handle network partition in coordination protocol', async () => {
|
||||
const protocol1 = new coordination_protocol_1.CoordinationProtocol({
|
||||
nodeId: 'node-1',
|
||||
heartbeatInterval: 1000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: [],
|
||||
});
|
||||
const protocol2 = new coordination_protocol_1.CoordinationProtocol({
|
||||
nodeId: 'node-2',
|
||||
heartbeatInterval: 1000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: [],
|
||||
});
|
||||
protocol1.registerNode('node-2');
|
||||
protocol2.registerNode('node-1');
|
||||
// Set up message forwarding
|
||||
let networkPartitioned = false;
|
||||
protocol1.on('message:transmit', (message) => {
|
||||
if (!networkPartitioned && message.to === 'node-2') {
|
||||
protocol2.receiveMessage(message);
|
||||
}
|
||||
});
|
||||
// Normal communication
|
||||
await protocol1.sendMessage('node-2', 'request', { test: 'data' });
|
||||
await TestUtils.sleep(100);
|
||||
// Simulate network partition
|
||||
networkPartitioned = true;
|
||||
let unhealthyDetected = false;
|
||||
protocol1.on('node:unhealthy', (data) => {
|
||||
if (data.nodeId === 'node-2') {
|
||||
unhealthyDetected = true;
|
||||
}
|
||||
});
|
||||
// Wait for health check to detect partition
|
||||
await TestUtils.sleep(4000);
|
||||
expect(unhealthyDetected).toBe(true);
|
||||
await protocol1.shutdown();
|
||||
await protocol2.shutdown();
|
||||
}, 10000);
|
||||
});
|
||||
console.log('\n=== Integration Tests ===');
|
||||
console.log('Run with: npm test');
|
||||
console.log('Tests include:');
|
||||
console.log(' - Agent Coordinator: Registration, load balancing, failover');
|
||||
console.log(' - Regional Agent: Query processing, indexing, rate limiting');
|
||||
console.log(' - Swarm Manager: Auto-scaling, health monitoring, metrics');
|
||||
console.log(' - Coordination Protocol: Messaging, consensus, pub/sub');
|
||||
console.log(' - Performance: High throughput, latency benchmarks');
|
||||
console.log(' - Failover: Agent failure, network partition, recovery');
|
||||
//# sourceMappingURL=integration-tests.js.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/integration-tests.js.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/integration-tests.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
826
vendor/ruvector/npm/packages/agentic-integration/integration-tests.ts
vendored
Normal file
826
vendor/ruvector/npm/packages/agentic-integration/integration-tests.ts
vendored
Normal file
@@ -0,0 +1,826 @@
|
||||
/**
|
||||
* Integration Tests - Comprehensive tests for agentic coordination
|
||||
*
|
||||
* Tests:
|
||||
* - Multi-agent coordination
|
||||
* - Failover scenarios
|
||||
* - Load distribution
|
||||
* - Performance benchmarks
|
||||
*/
|
||||
|
||||
import { AgentCoordinator, CoordinatorConfig } from './agent-coordinator';
|
||||
import { RegionalAgent, RegionalAgentConfig } from './regional-agent';
|
||||
import { SwarmManager, SwarmConfig } from './swarm-manager';
|
||||
import { CoordinationProtocol, CoordinationProtocolConfig } from './coordination-protocol';
|
||||
|
||||
/**
|
||||
* Test utilities
|
||||
*/
|
||||
class TestUtils {
|
||||
static async sleep(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
static generateRandomVector(dimensions: number): number[] {
|
||||
return Array.from({ length: dimensions }, () => Math.random());
|
||||
}
|
||||
|
||||
static async measureLatency<T>(fn: () => Promise<T>): Promise<{ result: T; latency: number }> {
|
||||
const start = Date.now();
|
||||
const result = await fn();
|
||||
const latency = Date.now() - start;
|
||||
return { result, latency };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test Suite 1: Agent Coordinator Tests
|
||||
*/
|
||||
describe('AgentCoordinator', () => {
|
||||
let coordinator: AgentCoordinator;
|
||||
|
||||
beforeEach(() => {
|
||||
const config: CoordinatorConfig = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'round-robin',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false, // Disable for testing
|
||||
};
|
||||
|
||||
coordinator = new AgentCoordinator(config);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await coordinator.shutdown();
|
||||
});
|
||||
|
||||
test('should register agents successfully', async () => {
|
||||
const registration = {
|
||||
agentId: 'test-agent-1',
|
||||
region: 'us-east',
|
||||
endpoint: 'https://us-east.ruvector.io/agent/test-agent-1',
|
||||
capabilities: ['query', 'index'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
};
|
||||
|
||||
await coordinator.registerAgent(registration);
|
||||
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.totalAgents).toBe(1);
|
||||
expect(status.regionDistribution['us-east']).toBe(1);
|
||||
});
|
||||
|
||||
test('should distribute tasks using round-robin', async () => {
|
||||
// Register multiple agents
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await coordinator.registerAgent({
|
||||
agentId: `agent-${i}`,
|
||||
region: 'us-east',
|
||||
endpoint: `https://us-east.ruvector.io/agent/agent-${i}`,
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
}
|
||||
|
||||
// Submit tasks
|
||||
const taskIds: string[] = [];
|
||||
for (let i = 0; i < 6; i++) {
|
||||
const taskId = await coordinator.submitTask({
|
||||
type: 'query',
|
||||
payload: { query: `test-query-${i}` },
|
||||
priority: 1,
|
||||
maxRetries: 3,
|
||||
});
|
||||
taskIds.push(taskId);
|
||||
}
|
||||
|
||||
expect(taskIds.length).toBe(6);
|
||||
|
||||
await TestUtils.sleep(1000);
|
||||
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.queuedTasks + status.activeTasks).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
test('should handle agent failures with circuit breaker', async () => {
|
||||
const registration = {
|
||||
agentId: 'failing-agent',
|
||||
region: 'us-west',
|
||||
endpoint: 'https://us-west.ruvector.io/agent/failing-agent',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
};
|
||||
|
||||
await coordinator.registerAgent(registration);
|
||||
|
||||
// Simulate agent going unhealthy
|
||||
coordinator.updateAgentMetrics({
|
||||
agentId: 'failing-agent',
|
||||
region: 'us-west',
|
||||
cpuUsage: 95,
|
||||
memoryUsage: 95,
|
||||
activeStreams: 1000,
|
||||
queryLatency: 5000,
|
||||
timestamp: Date.now(),
|
||||
healthy: false,
|
||||
});
|
||||
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.healthyAgents).toBe(0);
|
||||
});
|
||||
|
||||
test('should enforce max agents per region', async () => {
|
||||
const config: CoordinatorConfig = {
|
||||
maxAgentsPerRegion: 2,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'round-robin',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
|
||||
const limitedCoordinator = new AgentCoordinator(config);
|
||||
|
||||
// Register agents
|
||||
await limitedCoordinator.registerAgent({
|
||||
agentId: 'agent-1',
|
||||
region: 'eu-west',
|
||||
endpoint: 'https://eu-west.ruvector.io/agent/agent-1',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
|
||||
await limitedCoordinator.registerAgent({
|
||||
agentId: 'agent-2',
|
||||
region: 'eu-west',
|
||||
endpoint: 'https://eu-west.ruvector.io/agent/agent-2',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
|
||||
// Third agent should fail
|
||||
await expect(
|
||||
limitedCoordinator.registerAgent({
|
||||
agentId: 'agent-3',
|
||||
region: 'eu-west',
|
||||
endpoint: 'https://eu-west.ruvector.io/agent/agent-3',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
})
|
||||
).rejects.toThrow('has reached max agent capacity');
|
||||
|
||||
await limitedCoordinator.shutdown();
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Test Suite 2: Regional Agent Tests
|
||||
*/
|
||||
describe('RegionalAgent', () => {
|
||||
let agent: RegionalAgent;
|
||||
|
||||
beforeEach(() => {
|
||||
const config: RegionalAgentConfig = {
|
||||
agentId: 'test-agent-us-east-1',
|
||||
region: 'us-east',
|
||||
coordinatorEndpoint: 'coordinator.ruvector.io',
|
||||
localStoragePath: '/tmp/test-agent',
|
||||
maxConcurrentStreams: 100,
|
||||
metricsReportInterval: 5000,
|
||||
syncInterval: 2000,
|
||||
enableClaudeFlowHooks: false,
|
||||
vectorDimensions: 768,
|
||||
capabilities: ['query', 'index', 'sync'],
|
||||
};
|
||||
|
||||
agent = new RegionalAgent(config);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await agent.shutdown();
|
||||
});
|
||||
|
||||
test('should process query successfully', async () => {
|
||||
// Index some vectors
|
||||
await agent.indexVectors([
|
||||
{
|
||||
id: 'vec-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'test' },
|
||||
},
|
||||
{
|
||||
id: 'vec-2',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'test' },
|
||||
},
|
||||
]);
|
||||
|
||||
// Query
|
||||
const result = await agent.processQuery({
|
||||
id: 'query-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 2,
|
||||
timeout: 5000,
|
||||
});
|
||||
|
||||
expect(result.matches.length).toBeGreaterThan(0);
|
||||
expect(result.region).toBe('us-east');
|
||||
expect(result.latency).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
test('should validate query dimensions', async () => {
|
||||
await expect(
|
||||
agent.processQuery({
|
||||
id: 'query-invalid',
|
||||
vector: TestUtils.generateRandomVector(512), // Wrong dimension
|
||||
topK: 10,
|
||||
timeout: 5000,
|
||||
})
|
||||
).rejects.toThrow('Invalid vector dimensions');
|
||||
});
|
||||
|
||||
test('should apply filters in query', async () => {
|
||||
// Index vectors with different metadata
|
||||
await agent.indexVectors([
|
||||
{
|
||||
id: 'vec-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'A', type: 'test' },
|
||||
},
|
||||
{
|
||||
id: 'vec-2',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'B', type: 'test' },
|
||||
},
|
||||
{
|
||||
id: 'vec-3',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { category: 'A', type: 'prod' },
|
||||
},
|
||||
]);
|
||||
|
||||
// Query with filter
|
||||
const result = await agent.processQuery({
|
||||
id: 'query-filtered',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 10,
|
||||
filters: { category: 'A' },
|
||||
timeout: 5000,
|
||||
});
|
||||
|
||||
// Should only return vectors with category 'A'
|
||||
expect(result.matches.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
test('should enforce rate limiting', async () => {
|
||||
// Try to exceed max concurrent streams
|
||||
const promises: Promise<any>[] = [];
|
||||
|
||||
for (let i = 0; i < 150; i++) {
|
||||
promises.push(
|
||||
agent.processQuery({
|
||||
id: `query-${i}`,
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 5,
|
||||
timeout: 5000,
|
||||
}).catch(err => err)
|
||||
);
|
||||
}
|
||||
|
||||
const results = await Promise.all(promises);
|
||||
const rateLimitErrors = results.filter(r => r instanceof Error && r.message.includes('Rate limit'));
|
||||
|
||||
expect(rateLimitErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
test('should handle sync payloads from other regions', async () => {
|
||||
const syncPayload = {
|
||||
type: 'index' as const,
|
||||
data: [
|
||||
{
|
||||
id: 'sync-vec-1',
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { synced: true },
|
||||
},
|
||||
],
|
||||
timestamp: Date.now(),
|
||||
sourceRegion: 'us-west',
|
||||
};
|
||||
|
||||
await agent.handleSyncPayload(syncPayload);
|
||||
|
||||
const status = agent.getStatus();
|
||||
expect(status.indexSize).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Test Suite 3: Swarm Manager Tests
|
||||
*/
|
||||
describe('SwarmManager', () => {
|
||||
let coordinator: AgentCoordinator;
|
||||
let swarmManager: SwarmManager;
|
||||
|
||||
beforeEach(() => {
|
||||
const coordinatorConfig: CoordinatorConfig = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'adaptive',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
|
||||
coordinator = new AgentCoordinator(coordinatorConfig);
|
||||
|
||||
const swarmConfig: SwarmConfig = {
|
||||
topology: 'mesh',
|
||||
minAgentsPerRegion: 1,
|
||||
maxAgentsPerRegion: 5,
|
||||
scaleUpThreshold: 80,
|
||||
scaleDownThreshold: 20,
|
||||
scaleUpCooldown: 30000,
|
||||
scaleDownCooldown: 60000,
|
||||
healthCheckInterval: 5000,
|
||||
enableAutoScaling: true,
|
||||
enableClaudeFlowHooks: false,
|
||||
regions: ['us-east', 'us-west', 'eu-west'],
|
||||
};
|
||||
|
||||
swarmManager = new SwarmManager(swarmConfig, coordinator);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await swarmManager.shutdown();
|
||||
await coordinator.shutdown();
|
||||
});
|
||||
|
||||
test('should spawn initial agents for all regions', async () => {
|
||||
await TestUtils.sleep(1000); // Wait for initialization
|
||||
|
||||
const status = swarmManager.getStatus();
|
||||
expect(status.totalAgents).toBeGreaterThanOrEqual(3); // At least 1 per region
|
||||
expect(Object.keys(status.metrics.regionMetrics).length).toBe(3);
|
||||
});
|
||||
|
||||
test('should spawn additional agents in specific region', async () => {
|
||||
const initialStatus = swarmManager.getStatus();
|
||||
const initialCount = initialStatus.totalAgents;
|
||||
|
||||
await swarmManager.spawnAgent('us-east');
|
||||
|
||||
const newStatus = swarmManager.getStatus();
|
||||
expect(newStatus.totalAgents).toBe(initialCount + 1);
|
||||
});
|
||||
|
||||
test('should calculate swarm metrics correctly', async () => {
|
||||
await TestUtils.sleep(1000);
|
||||
|
||||
const metrics = swarmManager.calculateSwarmMetrics();
|
||||
|
||||
expect(metrics.totalAgents).toBeGreaterThan(0);
|
||||
expect(metrics.regionMetrics).toBeDefined();
|
||||
expect(Object.keys(metrics.regionMetrics).length).toBe(3);
|
||||
|
||||
for (const region of ['us-east', 'us-west', 'eu-west']) {
|
||||
expect(metrics.regionMetrics[region]).toBeDefined();
|
||||
expect(metrics.regionMetrics[region].agentCount).toBeGreaterThan(0);
|
||||
}
|
||||
});
|
||||
|
||||
test('should despawn agent and redistribute tasks', async () => {
|
||||
await TestUtils.sleep(1000);
|
||||
|
||||
const status = swarmManager.getStatus();
|
||||
const agentIds = Object.keys(status.metrics.regionMetrics);
|
||||
|
||||
if (agentIds.length > 0) {
|
||||
const initialCount = status.totalAgents;
|
||||
|
||||
// Get first agent ID from any region
|
||||
const regionMetrics = Object.values(status.metrics.regionMetrics);
|
||||
const firstRegion = regionMetrics[0];
|
||||
|
||||
// We'll need to track spawned agents to despawn them
|
||||
// For now, just verify the mechanism works
|
||||
expect(initialCount).toBeGreaterThan(0);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Test Suite 4: Coordination Protocol Tests
|
||||
*/
|
||||
describe('CoordinationProtocol', () => {
|
||||
let protocol1: CoordinationProtocol;
|
||||
let protocol2: CoordinationProtocol;
|
||||
|
||||
beforeEach(() => {
|
||||
const config1: CoordinationProtocolConfig = {
|
||||
nodeId: 'node-1',
|
||||
heartbeatInterval: 2000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: ['sync', 'metrics', 'alerts'],
|
||||
};
|
||||
|
||||
const config2: CoordinationProtocolConfig = {
|
||||
nodeId: 'node-2',
|
||||
heartbeatInterval: 2000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: ['sync', 'metrics', 'alerts'],
|
||||
};
|
||||
|
||||
protocol1 = new CoordinationProtocol(config1);
|
||||
protocol2 = new CoordinationProtocol(config2);
|
||||
|
||||
// Connect protocols
|
||||
protocol1.registerNode('node-2');
|
||||
protocol2.registerNode('node-1');
|
||||
|
||||
// Set up message forwarding
|
||||
protocol1.on('message:transmit', (message) => {
|
||||
if (message.to === 'node-2' || !message.to) {
|
||||
protocol2.receiveMessage(message);
|
||||
}
|
||||
});
|
||||
|
||||
protocol2.on('message:transmit', (message) => {
|
||||
if (message.to === 'node-1' || !message.to) {
|
||||
protocol1.receiveMessage(message);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await protocol1.shutdown();
|
||||
await protocol2.shutdown();
|
||||
});
|
||||
|
||||
test('should send and receive messages between nodes', async () => {
|
||||
let receivedMessage = false;
|
||||
|
||||
protocol2.on('request:received', (message) => {
|
||||
receivedMessage = true;
|
||||
expect(message.from).toBe('node-1');
|
||||
});
|
||||
|
||||
await protocol1.sendMessage('node-2', 'request', { test: 'data' });
|
||||
|
||||
await TestUtils.sleep(100);
|
||||
|
||||
expect(receivedMessage).toBe(true);
|
||||
});
|
||||
|
||||
test('should handle request-response pattern', async () => {
|
||||
protocol2.on('request:received', async (message) => {
|
||||
await protocol2.sendResponse(message.id, message.from, {
|
||||
status: 'ok',
|
||||
data: 'response',
|
||||
});
|
||||
});
|
||||
|
||||
const response = await protocol1.sendMessage(
|
||||
'node-2',
|
||||
'request',
|
||||
{ query: 'test' },
|
||||
{ expectResponse: true }
|
||||
);
|
||||
|
||||
expect(response.status).toBe('ok');
|
||||
});
|
||||
|
||||
test('should broadcast messages to all nodes', async () => {
|
||||
let received = false;
|
||||
|
||||
protocol2.on('broadcast:received', (message) => {
|
||||
received = true;
|
||||
expect(message.type).toBe('broadcast');
|
||||
});
|
||||
|
||||
await protocol1.broadcastMessage('broadcast', { event: 'test' });
|
||||
|
||||
await TestUtils.sleep(100);
|
||||
|
||||
expect(received).toBe(true);
|
||||
});
|
||||
|
||||
test('should handle consensus proposals', async () => {
|
||||
// Node 2 auto-approves proposals
|
||||
protocol2.on('consensus:proposed', async (proposal) => {
|
||||
// Auto-approve handled internally in test setup
|
||||
});
|
||||
|
||||
const approved = await protocol1.proposeConsensus(
|
||||
'schema_change',
|
||||
{ change: 'add_field' },
|
||||
1 // Only need 1 vote (from proposer)
|
||||
);
|
||||
|
||||
expect(approved).toBe(true);
|
||||
});
|
||||
|
||||
test('should handle pub/sub topics', async () => {
|
||||
let receivedMessage = false;
|
||||
|
||||
// Subscribe node 2 to 'sync' topic
|
||||
protocol2.subscribe('sync', 'node-2');
|
||||
|
||||
protocol2.on('topic:message', (data) => {
|
||||
if (data.topicName === 'sync') {
|
||||
receivedMessage = true;
|
||||
expect(data.message.payload.data).toBe('sync-data');
|
||||
}
|
||||
});
|
||||
|
||||
// Publish to topic
|
||||
await protocol1.publishToTopic('sync', { data: 'sync-data' });
|
||||
|
||||
await TestUtils.sleep(100);
|
||||
|
||||
expect(receivedMessage).toBe(true);
|
||||
});
|
||||
|
||||
test('should detect unhealthy nodes', async () => {
|
||||
let unhealthyDetected = false;
|
||||
|
||||
protocol1.on('node:unhealthy', (data) => {
|
||||
unhealthyDetected = true;
|
||||
expect(data.nodeId).toBe('node-2');
|
||||
});
|
||||
|
||||
// Stop node 2 heartbeat
|
||||
await protocol2.shutdown();
|
||||
|
||||
// Wait for health check to detect
|
||||
await TestUtils.sleep(7000);
|
||||
|
||||
expect(unhealthyDetected).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Test Suite 5: Performance Benchmarks
|
||||
*/
|
||||
describe('Performance Benchmarks', () => {
|
||||
test('should handle high query throughput', async () => {
|
||||
const config: RegionalAgentConfig = {
|
||||
agentId: 'perf-agent',
|
||||
region: 'us-east',
|
||||
coordinatorEndpoint: 'coordinator.ruvector.io',
|
||||
localStoragePath: '/tmp/perf-agent',
|
||||
maxConcurrentStreams: 1000,
|
||||
metricsReportInterval: 30000,
|
||||
syncInterval: 5000,
|
||||
enableClaudeFlowHooks: false,
|
||||
vectorDimensions: 768,
|
||||
capabilities: ['query'],
|
||||
};
|
||||
|
||||
const agent = new RegionalAgent(config);
|
||||
|
||||
// Index vectors
|
||||
const vectors = Array.from({ length: 10000 }, (_, i) => ({
|
||||
id: `vec-${i}`,
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
metadata: { index: i },
|
||||
}));
|
||||
|
||||
await agent.indexVectors(vectors);
|
||||
|
||||
// Run queries
|
||||
const queryCount = 1000;
|
||||
const queries: Promise<any>[] = [];
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
for (let i = 0; i < queryCount; i++) {
|
||||
queries.push(
|
||||
agent.processQuery({
|
||||
id: `perf-query-${i}`,
|
||||
vector: TestUtils.generateRandomVector(768),
|
||||
topK: 10,
|
||||
timeout: 5000,
|
||||
}).catch(() => null) // Ignore rate limit errors
|
||||
);
|
||||
}
|
||||
|
||||
const results = await Promise.all(queries);
|
||||
const successfulQueries = results.filter(r => r !== null);
|
||||
|
||||
const totalTime = Date.now() - startTime;
|
||||
const qps = (successfulQueries.length / totalTime) * 1000;
|
||||
|
||||
console.log(`\nPerformance Benchmark:`);
|
||||
console.log(`Total queries: ${queryCount}`);
|
||||
console.log(`Successful: ${successfulQueries.length}`);
|
||||
console.log(`Time: ${totalTime}ms`);
|
||||
console.log(`QPS: ${qps.toFixed(2)}`);
|
||||
|
||||
expect(successfulQueries.length).toBeGreaterThan(0);
|
||||
expect(qps).toBeGreaterThan(1); // At least 1 QPS
|
||||
|
||||
await agent.shutdown();
|
||||
});
|
||||
|
||||
test('should scale agents based on load', async () => {
|
||||
const coordinatorConfig: CoordinatorConfig = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 5000,
|
||||
taskTimeout: 10000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 5000,
|
||||
loadBalancingStrategy: 'adaptive',
|
||||
failoverThreshold: 3,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
|
||||
const coordinator = new AgentCoordinator(coordinatorConfig);
|
||||
|
||||
const swarmConfig: SwarmConfig = {
|
||||
topology: 'mesh',
|
||||
minAgentsPerRegion: 1,
|
||||
maxAgentsPerRegion: 5,
|
||||
scaleUpThreshold: 70,
|
||||
scaleDownThreshold: 30,
|
||||
scaleUpCooldown: 1000, // Short cooldown for testing
|
||||
scaleDownCooldown: 2000,
|
||||
healthCheckInterval: 1000,
|
||||
enableAutoScaling: true,
|
||||
enableClaudeFlowHooks: false,
|
||||
regions: ['us-east'],
|
||||
};
|
||||
|
||||
const swarmManager = new SwarmManager(swarmConfig, coordinator);
|
||||
|
||||
await TestUtils.sleep(1000);
|
||||
|
||||
const initialCount = swarmManager.getStatus().totalAgents;
|
||||
|
||||
// Spawn additional agents to simulate scale-up
|
||||
await swarmManager.spawnAgent('us-east');
|
||||
await swarmManager.spawnAgent('us-east');
|
||||
|
||||
await TestUtils.sleep(500);
|
||||
|
||||
const scaledCount = swarmManager.getStatus().totalAgents;
|
||||
|
||||
expect(scaledCount).toBeGreaterThan(initialCount);
|
||||
|
||||
await swarmManager.shutdown();
|
||||
await coordinator.shutdown();
|
||||
}, 15000);
|
||||
});
|
||||
|
||||
/**
|
||||
* Test Suite 6: Failover Scenarios
|
||||
*/
|
||||
describe('Failover Scenarios', () => {
|
||||
test('should handle agent failure and task redistribution', async () => {
|
||||
const coordinatorConfig: CoordinatorConfig = {
|
||||
maxAgentsPerRegion: 10,
|
||||
healthCheckInterval: 1000,
|
||||
taskTimeout: 5000,
|
||||
retryBackoffBase: 100,
|
||||
retryBackoffMax: 2000,
|
||||
loadBalancingStrategy: 'round-robin',
|
||||
failoverThreshold: 2,
|
||||
enableClaudeFlowHooks: false,
|
||||
};
|
||||
|
||||
const coordinator = new AgentCoordinator(coordinatorConfig);
|
||||
|
||||
// Register two agents
|
||||
await coordinator.registerAgent({
|
||||
agentId: 'agent-1',
|
||||
region: 'us-east',
|
||||
endpoint: 'https://us-east.ruvector.io/agent/agent-1',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
|
||||
await coordinator.registerAgent({
|
||||
agentId: 'agent-2',
|
||||
region: 'us-east',
|
||||
endpoint: 'https://us-east.ruvector.io/agent/agent-2',
|
||||
capabilities: ['query'],
|
||||
capacity: 1000,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
|
||||
// Submit tasks
|
||||
await coordinator.submitTask({
|
||||
type: 'query',
|
||||
payload: { query: 'test' },
|
||||
priority: 1,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
// Simulate agent-1 failure
|
||||
coordinator.updateAgentMetrics({
|
||||
agentId: 'agent-1',
|
||||
region: 'us-east',
|
||||
cpuUsage: 100,
|
||||
memoryUsage: 100,
|
||||
activeStreams: 1000,
|
||||
queryLatency: 10000,
|
||||
timestamp: Date.now(),
|
||||
healthy: false,
|
||||
});
|
||||
|
||||
await TestUtils.sleep(2000);
|
||||
|
||||
const status = coordinator.getStatus();
|
||||
expect(status.healthyAgents).toBe(1); // Only agent-2 healthy
|
||||
|
||||
await coordinator.shutdown();
|
||||
});
|
||||
|
||||
test('should handle network partition in coordination protocol', async () => {
|
||||
const protocol1 = new CoordinationProtocol({
|
||||
nodeId: 'node-1',
|
||||
heartbeatInterval: 1000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: [],
|
||||
});
|
||||
|
||||
const protocol2 = new CoordinationProtocol({
|
||||
nodeId: 'node-2',
|
||||
heartbeatInterval: 1000,
|
||||
messageTimeout: 5000,
|
||||
consensusTimeout: 10000,
|
||||
maxMessageQueueSize: 1000,
|
||||
enableClaudeFlowHooks: false,
|
||||
pubSubTopics: [],
|
||||
});
|
||||
|
||||
protocol1.registerNode('node-2');
|
||||
protocol2.registerNode('node-1');
|
||||
|
||||
// Set up message forwarding
|
||||
let networkPartitioned = false;
|
||||
|
||||
protocol1.on('message:transmit', (message) => {
|
||||
if (!networkPartitioned && message.to === 'node-2') {
|
||||
protocol2.receiveMessage(message);
|
||||
}
|
||||
});
|
||||
|
||||
// Normal communication
|
||||
await protocol1.sendMessage('node-2', 'request', { test: 'data' });
|
||||
|
||||
await TestUtils.sleep(100);
|
||||
|
||||
// Simulate network partition
|
||||
networkPartitioned = true;
|
||||
|
||||
let unhealthyDetected = false;
|
||||
|
||||
protocol1.on('node:unhealthy', (data) => {
|
||||
if (data.nodeId === 'node-2') {
|
||||
unhealthyDetected = true;
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for health check to detect partition
|
||||
await TestUtils.sleep(4000);
|
||||
|
||||
expect(unhealthyDetected).toBe(true);
|
||||
|
||||
await protocol1.shutdown();
|
||||
await protocol2.shutdown();
|
||||
}, 10000);
|
||||
});
|
||||
|
||||
console.log('\n=== Integration Tests ===');
|
||||
console.log('Run with: npm test');
|
||||
console.log('Tests include:');
|
||||
console.log(' - Agent Coordinator: Registration, load balancing, failover');
|
||||
console.log(' - Regional Agent: Query processing, indexing, rate limiting');
|
||||
console.log(' - Swarm Manager: Auto-scaling, health monitoring, metrics');
|
||||
console.log(' - Coordination Protocol: Messaging, consensus, pub/sub');
|
||||
console.log(' - Performance: High throughput, latency benchmarks');
|
||||
console.log(' - Failover: Agent failure, network partition, recovery');
|
||||
133
vendor/ruvector/npm/packages/agentic-integration/package.json
vendored
Normal file
133
vendor/ruvector/npm/packages/agentic-integration/package.json
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
{
|
||||
"name": "@ruvector/agentic-integration",
|
||||
"version": "1.0.0",
|
||||
"description": "Distributed agent coordination for ruvector with claude-flow integration",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"test": "jest --coverage",
|
||||
"test:watch": "jest --watch",
|
||||
"test:integration": "jest --testPathPattern=integration-tests",
|
||||
"lint": "eslint src/**/*.ts",
|
||||
"format": "prettier --write src/**/*.ts",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"deploy:us-east": "npm run build && gcloud run deploy ruvector-agent-us-east --source .",
|
||||
"deploy:us-west": "npm run build && gcloud run deploy ruvector-agent-us-west --source .",
|
||||
"deploy:eu-west": "npm run build && gcloud run deploy ruvector-agent-eu-west --source .",
|
||||
"deploy:asia-east": "npm run build && gcloud run deploy ruvector-agent-asia-east --source .",
|
||||
"deploy:all": "npm run deploy:us-east && npm run deploy:us-west && npm run deploy:eu-west && npm run deploy:asia-east",
|
||||
"benchmark": "node dist/benchmarks/performance.js",
|
||||
"monitor": "node dist/tools/monitor.js",
|
||||
"swarm:init": "npx claude-flow@alpha hooks pre-task --description 'Initialize swarm'",
|
||||
"swarm:status": "node dist/tools/swarm-status.js"
|
||||
},
|
||||
"keywords": [
|
||||
"ruvector",
|
||||
"distributed-systems",
|
||||
"agent-coordination",
|
||||
"vector-search",
|
||||
"claude-flow",
|
||||
"swarm",
|
||||
"mesh-coordination"
|
||||
],
|
||||
"author": "RuVector Team",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"claude-flow": "^2.0.0",
|
||||
"events": "^3.3.0",
|
||||
"winston": "^3.11.0",
|
||||
"pino": "^8.17.0",
|
||||
"dotenv": "^16.3.1",
|
||||
"@google-cloud/pubsub": "^4.0.7",
|
||||
"@google-cloud/storage": "^7.7.0",
|
||||
"@grpc/grpc-js": "^1.9.13",
|
||||
"@grpc/proto-loader": "^0.7.10",
|
||||
"axios": "^1.6.2",
|
||||
"express": "^4.18.2",
|
||||
"fastify": "^4.25.2",
|
||||
"ioredis": "^5.3.2",
|
||||
"pg": "^8.11.3",
|
||||
"uuid": "^9.0.1",
|
||||
"zod": "^3.22.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.10.6",
|
||||
"@types/jest": "^29.5.11",
|
||||
"@types/express": "^4.17.21",
|
||||
"@typescript-eslint/eslint-plugin": "^6.16.0",
|
||||
"@typescript-eslint/parser": "^6.16.0",
|
||||
"eslint": "^8.56.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"jest": "^29.7.0",
|
||||
"ts-jest": "^29.1.1",
|
||||
"ts-node": "^10.9.2",
|
||||
"typescript": "^5.3.3",
|
||||
"prettier": "^3.1.1",
|
||||
"nodemon": "^3.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0",
|
||||
"npm": ">=9.0.0"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
},
|
||||
"./coordinator": {
|
||||
"import": "./dist/agent-coordinator.js",
|
||||
"require": "./dist/agent-coordinator.js",
|
||||
"types": "./dist/agent-coordinator.d.ts"
|
||||
},
|
||||
"./agent": {
|
||||
"import": "./dist/regional-agent.js",
|
||||
"require": "./dist/regional-agent.js",
|
||||
"types": "./dist/regional-agent.d.ts"
|
||||
},
|
||||
"./swarm": {
|
||||
"import": "./dist/swarm-manager.js",
|
||||
"require": "./dist/swarm-manager.js",
|
||||
"types": "./dist/swarm-manager.d.ts"
|
||||
},
|
||||
"./protocol": {
|
||||
"import": "./dist/coordination-protocol.js",
|
||||
"require": "./dist/coordination-protocol.js",
|
||||
"types": "./dist/coordination-protocol.d.ts"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"README.md",
|
||||
"LICENSE"
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/ruvnet/ruvector.git",
|
||||
"directory": "src/agentic-integration"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/ruvnet/ruvector/issues"
|
||||
},
|
||||
"homepage": "https://github.com/ruvnet/ruvector#readme",
|
||||
"jest": {
|
||||
"preset": "ts-jest",
|
||||
"testEnvironment": "node",
|
||||
"coverageDirectory": "coverage",
|
||||
"collectCoverageFrom": [
|
||||
"src/**/*.ts",
|
||||
"!src/**/*.test.ts",
|
||||
"!src/**/*.spec.ts"
|
||||
],
|
||||
"testMatch": [
|
||||
"**/__tests__/**/*.ts",
|
||||
"**/?(*.)+(spec|test).ts"
|
||||
],
|
||||
"moduleFileExtensions": [
|
||||
"ts",
|
||||
"js",
|
||||
"json"
|
||||
]
|
||||
}
|
||||
}
|
||||
155
vendor/ruvector/npm/packages/agentic-integration/regional-agent.d.ts
vendored
Normal file
155
vendor/ruvector/npm/packages/agentic-integration/regional-agent.d.ts
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
/**
|
||||
* Regional Agent - Per-region agent implementation for distributed processing
|
||||
*
|
||||
* Handles:
|
||||
* - Region-specific initialization
|
||||
* - Local query processing
|
||||
* - Cross-region communication
|
||||
* - State synchronization
|
||||
* - Metrics reporting
|
||||
*/
|
||||
import { EventEmitter } from 'events';
|
||||
export interface RegionalAgentConfig {
|
||||
agentId: string;
|
||||
region: string;
|
||||
coordinatorEndpoint: string;
|
||||
localStoragePath: string;
|
||||
maxConcurrentStreams: number;
|
||||
metricsReportInterval: number;
|
||||
syncInterval: number;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
vectorDimensions: number;
|
||||
capabilities: string[];
|
||||
}
|
||||
export interface QueryRequest {
|
||||
id: string;
|
||||
vector: number[];
|
||||
topK: number;
|
||||
filters?: Record<string, any>;
|
||||
timeout: number;
|
||||
}
|
||||
export interface QueryResult {
|
||||
id: string;
|
||||
matches: Array<{
|
||||
id: string;
|
||||
score: number;
|
||||
metadata: Record<string, any>;
|
||||
}>;
|
||||
latency: number;
|
||||
region: string;
|
||||
}
|
||||
export interface SyncPayload {
|
||||
type: 'index' | 'update' | 'delete';
|
||||
data: any;
|
||||
timestamp: number;
|
||||
sourceRegion: string;
|
||||
}
|
||||
export declare class RegionalAgent extends EventEmitter {
|
||||
private config;
|
||||
private activeStreams;
|
||||
private totalQueries;
|
||||
private totalLatency;
|
||||
private metricsTimer?;
|
||||
private syncTimer?;
|
||||
private localIndex;
|
||||
private syncQueue;
|
||||
private rateLimiter;
|
||||
constructor(config: RegionalAgentConfig);
|
||||
/**
|
||||
* Initialize regional agent
|
||||
*/
|
||||
private initialize;
|
||||
/**
|
||||
* Load local index from persistent storage
|
||||
*/
|
||||
private loadLocalIndex;
|
||||
/**
|
||||
* Register with coordinator
|
||||
*/
|
||||
private registerWithCoordinator;
|
||||
/**
|
||||
* Process query request locally
|
||||
*/
|
||||
processQuery(request: QueryRequest): Promise<QueryResult>;
|
||||
/**
|
||||
* Validate query request
|
||||
*/
|
||||
private validateQuery;
|
||||
/**
|
||||
* Search vectors in local index
|
||||
*/
|
||||
private searchVectors;
|
||||
/**
|
||||
* Calculate cosine similarity between vectors
|
||||
*/
|
||||
private calculateSimilarity;
|
||||
/**
|
||||
* Check if metadata matches filters
|
||||
*/
|
||||
private matchesFilters;
|
||||
/**
|
||||
* Add/update vectors in local index
|
||||
*/
|
||||
indexVectors(vectors: Array<{
|
||||
id: string;
|
||||
vector: number[];
|
||||
metadata?: Record<string, any>;
|
||||
}>): Promise<void>;
|
||||
/**
|
||||
* Delete vectors from local index
|
||||
*/
|
||||
deleteVectors(ids: string[]): Promise<void>;
|
||||
/**
|
||||
* Handle sync payload from other regions
|
||||
*/
|
||||
handleSyncPayload(payload: SyncPayload): Promise<void>;
|
||||
/**
|
||||
* Start metrics reporting loop
|
||||
*/
|
||||
private startMetricsReporting;
|
||||
/**
|
||||
* Report metrics to coordinator
|
||||
*/
|
||||
private reportMetrics;
|
||||
/**
|
||||
* Get CPU usage (placeholder)
|
||||
*/
|
||||
private getCpuUsage;
|
||||
/**
|
||||
* Get memory usage (placeholder)
|
||||
*/
|
||||
private getMemoryUsage;
|
||||
/**
|
||||
* Check if agent is healthy
|
||||
*/
|
||||
private isHealthy;
|
||||
/**
|
||||
* Start sync process loop
|
||||
*/
|
||||
private startSyncProcess;
|
||||
/**
|
||||
* Process sync queue (send to other regions)
|
||||
*/
|
||||
private processSyncQueue;
|
||||
/**
|
||||
* Get agent status
|
||||
*/
|
||||
getStatus(): {
|
||||
agentId: string;
|
||||
region: string;
|
||||
healthy: boolean;
|
||||
activeStreams: number;
|
||||
indexSize: number;
|
||||
syncQueueSize: number;
|
||||
avgQueryLatency: number;
|
||||
};
|
||||
/**
|
||||
* Shutdown agent gracefully
|
||||
*/
|
||||
shutdown(): Promise<void>;
|
||||
/**
|
||||
* Save local index to persistent storage
|
||||
*/
|
||||
private saveLocalIndex;
|
||||
}
|
||||
//# sourceMappingURL=regional-agent.d.ts.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/regional-agent.d.ts.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/regional-agent.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"regional-agent.d.ts","sourceRoot":"","sources":["regional-agent.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAEH,OAAO,EAAE,YAAY,EAAE,MAAM,QAAQ,CAAC;AAMtC,MAAM,WAAW,mBAAmB;IAClC,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;IACf,mBAAmB,EAAE,MAAM,CAAC;IAC5B,gBAAgB,EAAE,MAAM,CAAC;IACzB,oBAAoB,EAAE,MAAM,CAAC;IAC7B,qBAAqB,EAAE,MAAM,CAAC;IAC9B,YAAY,EAAE,MAAM,CAAC;IACrB,qBAAqB,EAAE,OAAO,CAAC;IAC/B,gBAAgB,EAAE,MAAM,CAAC;IACzB,YAAY,EAAE,MAAM,EAAE,CAAC;CACxB;AAED,MAAM,WAAW,YAAY;IAC3B,EAAE,EAAE,MAAM,CAAC;IACX,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC9B,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,WAAW;IAC1B,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,KAAK,CAAC;QACb,EAAE,EAAE,MAAM,CAAC;QACX,KAAK,EAAE,MAAM,CAAC;QACd,QAAQ,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;KAC/B,CAAC,CAAC;IACH,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,WAAW,WAAW;IAC1B,IAAI,EAAE,OAAO,GAAG,QAAQ,GAAG,QAAQ,CAAC;IACpC,IAAI,EAAE,GAAG,CAAC;IACV,SAAS,EAAE,MAAM,CAAC;IAClB,YAAY,EAAE,MAAM,CAAC;CACtB;AAED,qBAAa,aAAc,SAAQ,YAAY;IAUjC,OAAO,CAAC,MAAM;IAT1B,OAAO,CAAC,aAAa,CAAK;IAC1B,OAAO,CAAC,YAAY,CAAK;IACzB,OAAO,CAAC,YAAY,CAAK;IACzB,OAAO,CAAC,YAAY,CAAC,CAAiB;IACtC,OAAO,CAAC,SAAS,CAAC,CAAiB;IACnC,OAAO,CAAC,UAAU,CAA+B;IACjD,OAAO,CAAC,SAAS,CAAqB;IACtC,OAAO,CAAC,WAAW,CAAc;gBAEb,MAAM,EAAE,mBAAmB;IAS/C;;OAEG;YACW,UAAU;IAyCxB;;OAEG;YACW,cAAc;IAgB5B;;OAEG;YACW,uBAAuB;IAsBrC;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,YAAY,GAAG,OAAO,CAAC,WAAW,CAAC;IAiE/D;;OAEG;IACH,OAAO,CAAC,aAAa;IAYrB;;OAEG;YACW,aAAa;IA2B3B;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAc3B;;OAEG;IACH,OAAO,CAAC,cAAc;IAStB;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,KAAK,CAAC;QAAE,EAAE,EAAE,MAAM,CAAC;QAAC,MAAM,EAAE,MAAM,EAAE,CAAC;QAAC,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;KAAE,CAAC,GAAG,OAAO,CAAC,IAAI,CAAC;IA4BnH;;OAEG;IACG,aAAa,CAAC,GAAG,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC;IAkBjD;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC;IAuC5D;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAM7B;;OAEG;IACH,OAAO,CAAC,aAAa;IAqBrB;;OAEG;IACH,OAAO,CAAC,WAAW;IAKnB;;OAEG;IACH,OAAO,CAAC,cAAc;IAMtB;;OAEG;IACH,OAAO,CAAC,SAAS;IAQjB;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAMxB;;OAEG;YACW,gBAAgB;IAY9B;;OAEG;IACH,SAAS,IAAI;QACX,OAAO,EAAE,MAAM,CAAC;QAChB,MAAM,EAAE,MAAM,CAAC;QACf,OAAO,EAAE,OAAO,CAAC;QACjB,aAAa,EAAE,MAAM,CAAC;QACtB,SAAS,EAAE,MAAM,CAAC;QAClB,aAAa,EAAE,MAAM,CAAC;QACtB,eAAe,EAAE,MAAM,CAAC;KACzB;IAYD;;OAEG;IACG,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;IAoC/B;;OAEG;YACW,cAAc;CAa7B"}
|
||||
456
vendor/ruvector/npm/packages/agentic-integration/regional-agent.js
vendored
Normal file
456
vendor/ruvector/npm/packages/agentic-integration/regional-agent.js
vendored
Normal file
@@ -0,0 +1,456 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Regional Agent - Per-region agent implementation for distributed processing
|
||||
*
|
||||
* Handles:
|
||||
* - Region-specific initialization
|
||||
* - Local query processing
|
||||
* - Cross-region communication
|
||||
* - State synchronization
|
||||
* - Metrics reporting
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.RegionalAgent = void 0;
|
||||
const events_1 = require("events");
|
||||
const child_process_1 = require("child_process");
|
||||
const util_1 = require("util");
|
||||
const execAsync = (0, util_1.promisify)(child_process_1.exec);
|
||||
class RegionalAgent extends events_1.EventEmitter {
|
||||
constructor(config) {
|
||||
super();
|
||||
this.config = config;
|
||||
this.activeStreams = 0;
|
||||
this.totalQueries = 0;
|
||||
this.totalLatency = 0;
|
||||
this.localIndex = new Map();
|
||||
this.syncQueue = [];
|
||||
this.rateLimiter = new RateLimiter({
|
||||
maxRequests: config.maxConcurrentStreams,
|
||||
windowMs: 1000,
|
||||
});
|
||||
this.initialize();
|
||||
}
|
||||
/**
|
||||
* Initialize regional agent
|
||||
*/
|
||||
async initialize() {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Initializing agent ${this.config.agentId}...`);
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Pre-task hook for agent initialization
|
||||
await execAsync(`npx claude-flow@alpha hooks pre-task --description "Initialize regional agent ${this.config.agentId} in ${this.config.region}"`);
|
||||
// Restore session if available
|
||||
await execAsync(`npx claude-flow@alpha hooks session-restore --session-id "agent-${this.config.agentId}"`);
|
||||
console.log(`[RegionalAgent:${this.config.region}] Claude-flow hooks initialized`);
|
||||
}
|
||||
catch (error) {
|
||||
console.warn(`[RegionalAgent:${this.config.region}] Claude-flow hooks not available:`, error);
|
||||
}
|
||||
}
|
||||
// Load local index from storage
|
||||
await this.loadLocalIndex();
|
||||
// Start metrics reporting
|
||||
this.startMetricsReporting();
|
||||
// Start sync process
|
||||
this.startSyncProcess();
|
||||
// Register with coordinator
|
||||
await this.registerWithCoordinator();
|
||||
this.emit('agent:initialized', {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
});
|
||||
console.log(`[RegionalAgent:${this.config.region}] Agent ${this.config.agentId} initialized successfully`);
|
||||
}
|
||||
/**
|
||||
* Load local index from persistent storage
|
||||
*/
|
||||
async loadLocalIndex() {
|
||||
try {
|
||||
// Placeholder for actual storage loading
|
||||
// In production, this would load from disk/database
|
||||
console.log(`[RegionalAgent:${this.config.region}] Loading local index from ${this.config.localStoragePath}`);
|
||||
// Simulate loading
|
||||
this.localIndex.clear();
|
||||
console.log(`[RegionalAgent:${this.config.region}] Local index loaded: ${this.localIndex.size} vectors`);
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error loading local index:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Register with coordinator
|
||||
*/
|
||||
async registerWithCoordinator() {
|
||||
try {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Registering with coordinator at ${this.config.coordinatorEndpoint}`);
|
||||
// In production, this would be an HTTP/gRPC call
|
||||
// For now, emit event
|
||||
this.emit('coordinator:register', {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
endpoint: `https://${this.config.region}.ruvector.io/agent/${this.config.agentId}`,
|
||||
capabilities: this.config.capabilities,
|
||||
capacity: this.config.maxConcurrentStreams,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
console.log(`[RegionalAgent:${this.config.region}] Successfully registered with coordinator`);
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Failed to register with coordinator:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Process query request locally
|
||||
*/
|
||||
async processQuery(request) {
|
||||
const startTime = Date.now();
|
||||
// Check rate limit
|
||||
if (!this.rateLimiter.tryAcquire()) {
|
||||
throw new Error('Rate limit exceeded');
|
||||
}
|
||||
this.activeStreams++;
|
||||
this.totalQueries++;
|
||||
try {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Processing query ${request.id}`);
|
||||
// Validate query
|
||||
this.validateQuery(request);
|
||||
// Execute vector search
|
||||
const matches = await this.searchVectors(request);
|
||||
const latency = Date.now() - startTime;
|
||||
this.totalLatency += latency;
|
||||
const result = {
|
||||
id: request.id,
|
||||
matches,
|
||||
latency,
|
||||
region: this.config.region,
|
||||
};
|
||||
this.emit('query:completed', {
|
||||
queryId: request.id,
|
||||
latency,
|
||||
matchCount: matches.length,
|
||||
});
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Notify about query completion
|
||||
await execAsync(`npx claude-flow@alpha hooks notify --message "Query ${request.id} completed in ${latency}ms with ${matches.length} matches"`);
|
||||
}
|
||||
catch (error) {
|
||||
// Non-critical error
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error processing query ${request.id}:`, error);
|
||||
this.emit('query:failed', {
|
||||
queryId: request.id,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
finally {
|
||||
this.activeStreams--;
|
||||
this.rateLimiter.release();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Validate query request
|
||||
*/
|
||||
validateQuery(request) {
|
||||
if (!request.vector || request.vector.length !== this.config.vectorDimensions) {
|
||||
throw new Error(`Invalid vector dimensions: expected ${this.config.vectorDimensions}, got ${request.vector?.length || 0}`);
|
||||
}
|
||||
if (request.topK <= 0 || request.topK > 1000) {
|
||||
throw new Error(`Invalid topK value: ${request.topK} (must be between 1 and 1000)`);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Search vectors in local index
|
||||
*/
|
||||
async searchVectors(request) {
|
||||
// Placeholder for actual vector search
|
||||
// In production, this would use FAISS, Annoy, or similar library
|
||||
const matches = [];
|
||||
// Simulate vector search
|
||||
for (const [id, vector] of this.localIndex.entries()) {
|
||||
const score = this.calculateSimilarity(request.vector, vector);
|
||||
// Apply filters if present
|
||||
if (request.filters && !this.matchesFilters(vector.metadata, request.filters)) {
|
||||
continue;
|
||||
}
|
||||
matches.push({
|
||||
id,
|
||||
score,
|
||||
metadata: vector.metadata || {},
|
||||
});
|
||||
}
|
||||
// Sort by score and return top-k
|
||||
matches.sort((a, b) => b.score - a.score);
|
||||
return matches.slice(0, request.topK);
|
||||
}
|
||||
/**
|
||||
* Calculate cosine similarity between vectors
|
||||
*/
|
||||
calculateSimilarity(v1, v2) {
|
||||
let dotProduct = 0;
|
||||
let norm1 = 0;
|
||||
let norm2 = 0;
|
||||
for (let i = 0; i < v1.length; i++) {
|
||||
dotProduct += v1[i] * v2[i];
|
||||
norm1 += v1[i] * v1[i];
|
||||
norm2 += v2[i] * v2[i];
|
||||
}
|
||||
return dotProduct / (Math.sqrt(norm1) * Math.sqrt(norm2));
|
||||
}
|
||||
/**
|
||||
* Check if metadata matches filters
|
||||
*/
|
||||
matchesFilters(metadata, filters) {
|
||||
for (const [key, value] of Object.entries(filters)) {
|
||||
if (metadata[key] !== value) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Add/update vectors in local index
|
||||
*/
|
||||
async indexVectors(vectors) {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Indexing ${vectors.length} vectors`);
|
||||
for (const { id, vector, metadata } of vectors) {
|
||||
this.localIndex.set(id, { vector, metadata });
|
||||
}
|
||||
// Queue for cross-region sync
|
||||
this.syncQueue.push({
|
||||
type: 'index',
|
||||
data: vectors,
|
||||
timestamp: Date.now(),
|
||||
sourceRegion: this.config.region,
|
||||
});
|
||||
this.emit('vectors:indexed', { count: vectors.length });
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(`npx claude-flow@alpha hooks post-edit --file "local-index" --memory-key "swarm/${this.config.agentId}/index-update"`);
|
||||
}
|
||||
catch (error) {
|
||||
// Non-critical
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Delete vectors from local index
|
||||
*/
|
||||
async deleteVectors(ids) {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Deleting ${ids.length} vectors`);
|
||||
for (const id of ids) {
|
||||
this.localIndex.delete(id);
|
||||
}
|
||||
// Queue for cross-region sync
|
||||
this.syncQueue.push({
|
||||
type: 'delete',
|
||||
data: ids,
|
||||
timestamp: Date.now(),
|
||||
sourceRegion: this.config.region,
|
||||
});
|
||||
this.emit('vectors:deleted', { count: ids.length });
|
||||
}
|
||||
/**
|
||||
* Handle sync payload from other regions
|
||||
*/
|
||||
async handleSyncPayload(payload) {
|
||||
// Don't process our own sync messages
|
||||
if (payload.sourceRegion === this.config.region) {
|
||||
return;
|
||||
}
|
||||
console.log(`[RegionalAgent:${this.config.region}] Received sync payload from ${payload.sourceRegion}: ${payload.type}`);
|
||||
try {
|
||||
switch (payload.type) {
|
||||
case 'index':
|
||||
await this.indexVectors(payload.data);
|
||||
break;
|
||||
case 'update':
|
||||
await this.indexVectors(payload.data);
|
||||
break;
|
||||
case 'delete':
|
||||
await this.deleteVectors(payload.data);
|
||||
break;
|
||||
}
|
||||
this.emit('sync:applied', {
|
||||
type: payload.type,
|
||||
sourceRegion: payload.sourceRegion,
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error applying sync payload:`, error);
|
||||
this.emit('sync:failed', {
|
||||
type: payload.type,
|
||||
sourceRegion: payload.sourceRegion,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Start metrics reporting loop
|
||||
*/
|
||||
startMetricsReporting() {
|
||||
this.metricsTimer = setInterval(() => {
|
||||
this.reportMetrics();
|
||||
}, this.config.metricsReportInterval);
|
||||
}
|
||||
/**
|
||||
* Report metrics to coordinator
|
||||
*/
|
||||
reportMetrics() {
|
||||
const metrics = {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
cpuUsage: this.getCpuUsage(),
|
||||
memoryUsage: this.getMemoryUsage(),
|
||||
activeStreams: this.activeStreams,
|
||||
queryLatency: this.totalQueries > 0 ? this.totalLatency / this.totalQueries : 0,
|
||||
timestamp: Date.now(),
|
||||
healthy: this.isHealthy(),
|
||||
};
|
||||
this.emit('metrics:report', metrics);
|
||||
// Reset counters (sliding window)
|
||||
if (this.totalQueries > 1000) {
|
||||
this.totalQueries = 0;
|
||||
this.totalLatency = 0;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get CPU usage (placeholder)
|
||||
*/
|
||||
getCpuUsage() {
|
||||
// In production, this would read from /proc/stat or similar
|
||||
return Math.random() * 100;
|
||||
}
|
||||
/**
|
||||
* Get memory usage (placeholder)
|
||||
*/
|
||||
getMemoryUsage() {
|
||||
// In production, this would read from process.memoryUsage()
|
||||
const usage = process.memoryUsage();
|
||||
return (usage.heapUsed / usage.heapTotal) * 100;
|
||||
}
|
||||
/**
|
||||
* Check if agent is healthy
|
||||
*/
|
||||
isHealthy() {
|
||||
return (this.activeStreams < this.config.maxConcurrentStreams &&
|
||||
this.getMemoryUsage() < 90 &&
|
||||
this.getCpuUsage() < 90);
|
||||
}
|
||||
/**
|
||||
* Start sync process loop
|
||||
*/
|
||||
startSyncProcess() {
|
||||
this.syncTimer = setInterval(() => {
|
||||
this.processSyncQueue();
|
||||
}, this.config.syncInterval);
|
||||
}
|
||||
/**
|
||||
* Process sync queue (send to other regions)
|
||||
*/
|
||||
async processSyncQueue() {
|
||||
if (this.syncQueue.length === 0)
|
||||
return;
|
||||
const batch = this.syncQueue.splice(0, 100); // Process in batches
|
||||
console.log(`[RegionalAgent:${this.config.region}] Processing sync batch: ${batch.length} items`);
|
||||
for (const payload of batch) {
|
||||
this.emit('sync:broadcast', payload);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get agent status
|
||||
*/
|
||||
getStatus() {
|
||||
return {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
healthy: this.isHealthy(),
|
||||
activeStreams: this.activeStreams,
|
||||
indexSize: this.localIndex.size,
|
||||
syncQueueSize: this.syncQueue.length,
|
||||
avgQueryLatency: this.totalQueries > 0 ? this.totalLatency / this.totalQueries : 0,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Shutdown agent gracefully
|
||||
*/
|
||||
async shutdown() {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Shutting down agent ${this.config.agentId}...`);
|
||||
// Stop timers
|
||||
if (this.metricsTimer) {
|
||||
clearInterval(this.metricsTimer);
|
||||
}
|
||||
if (this.syncTimer) {
|
||||
clearInterval(this.syncTimer);
|
||||
}
|
||||
// Process remaining sync queue
|
||||
await this.processSyncQueue();
|
||||
// Save local index
|
||||
await this.saveLocalIndex();
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(`npx claude-flow@alpha hooks post-task --task-id "agent-${this.config.agentId}-shutdown"`);
|
||||
await execAsync(`npx claude-flow@alpha hooks session-end --export-metrics true`);
|
||||
}
|
||||
catch (error) {
|
||||
console.warn(`[RegionalAgent:${this.config.region}] Error executing shutdown hooks:`, error);
|
||||
}
|
||||
}
|
||||
this.emit('agent:shutdown', {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Save local index to persistent storage
|
||||
*/
|
||||
async saveLocalIndex() {
|
||||
try {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Saving local index to ${this.config.localStoragePath}`);
|
||||
// Placeholder for actual storage saving
|
||||
// In production, this would write to disk/database
|
||||
console.log(`[RegionalAgent:${this.config.region}] Local index saved: ${this.localIndex.size} vectors`);
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error saving local index:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.RegionalAgent = RegionalAgent;
|
||||
/**
|
||||
* Rate limiter for query processing
|
||||
*/
|
||||
class RateLimiter {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.requests = 0;
|
||||
this.windowStart = Date.now();
|
||||
}
|
||||
tryAcquire() {
|
||||
const now = Date.now();
|
||||
// Reset window if expired
|
||||
if (now - this.windowStart >= this.config.windowMs) {
|
||||
this.requests = 0;
|
||||
this.windowStart = now;
|
||||
}
|
||||
if (this.requests < this.config.maxRequests) {
|
||||
this.requests++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
release() {
|
||||
if (this.requests > 0) {
|
||||
this.requests--;
|
||||
}
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=regional-agent.js.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/regional-agent.js.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/regional-agent.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
601
vendor/ruvector/npm/packages/agentic-integration/regional-agent.ts
vendored
Normal file
601
vendor/ruvector/npm/packages/agentic-integration/regional-agent.ts
vendored
Normal file
@@ -0,0 +1,601 @@
|
||||
/**
|
||||
* Regional Agent - Per-region agent implementation for distributed processing
|
||||
*
|
||||
* Handles:
|
||||
* - Region-specific initialization
|
||||
* - Local query processing
|
||||
* - Cross-region communication
|
||||
* - State synchronization
|
||||
* - Metrics reporting
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
export interface RegionalAgentConfig {
|
||||
agentId: string;
|
||||
region: string;
|
||||
coordinatorEndpoint: string;
|
||||
localStoragePath: string;
|
||||
maxConcurrentStreams: number;
|
||||
metricsReportInterval: number;
|
||||
syncInterval: number;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
vectorDimensions: number;
|
||||
capabilities: string[];
|
||||
}
|
||||
|
||||
export interface QueryRequest {
|
||||
id: string;
|
||||
vector: number[];
|
||||
topK: number;
|
||||
filters?: Record<string, any>;
|
||||
timeout: number;
|
||||
}
|
||||
|
||||
export interface QueryResult {
|
||||
id: string;
|
||||
matches: Array<{
|
||||
id: string;
|
||||
score: number;
|
||||
metadata: Record<string, any>;
|
||||
}>;
|
||||
latency: number;
|
||||
region: string;
|
||||
}
|
||||
|
||||
export interface SyncPayload {
|
||||
type: 'index' | 'update' | 'delete';
|
||||
data: any;
|
||||
timestamp: number;
|
||||
sourceRegion: string;
|
||||
}
|
||||
|
||||
export class RegionalAgent extends EventEmitter {
|
||||
private activeStreams = 0;
|
||||
private totalQueries = 0;
|
||||
private totalLatency = 0;
|
||||
private metricsTimer?: NodeJS.Timeout;
|
||||
private syncTimer?: NodeJS.Timeout;
|
||||
private localIndex: Map<string, any> = new Map();
|
||||
private syncQueue: SyncPayload[] = [];
|
||||
private rateLimiter: RateLimiter;
|
||||
|
||||
constructor(private config: RegionalAgentConfig) {
|
||||
super();
|
||||
this.rateLimiter = new RateLimiter({
|
||||
maxRequests: config.maxConcurrentStreams,
|
||||
windowMs: 1000,
|
||||
});
|
||||
this.initialize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize regional agent
|
||||
*/
|
||||
private async initialize(): Promise<void> {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Initializing agent ${this.config.agentId}...`);
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Pre-task hook for agent initialization
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks pre-task --description "Initialize regional agent ${this.config.agentId} in ${this.config.region}"`
|
||||
);
|
||||
|
||||
// Restore session if available
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks session-restore --session-id "agent-${this.config.agentId}"`
|
||||
);
|
||||
|
||||
console.log(`[RegionalAgent:${this.config.region}] Claude-flow hooks initialized`);
|
||||
} catch (error) {
|
||||
console.warn(`[RegionalAgent:${this.config.region}] Claude-flow hooks not available:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
// Load local index from storage
|
||||
await this.loadLocalIndex();
|
||||
|
||||
// Start metrics reporting
|
||||
this.startMetricsReporting();
|
||||
|
||||
// Start sync process
|
||||
this.startSyncProcess();
|
||||
|
||||
// Register with coordinator
|
||||
await this.registerWithCoordinator();
|
||||
|
||||
this.emit('agent:initialized', {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
});
|
||||
|
||||
console.log(`[RegionalAgent:${this.config.region}] Agent ${this.config.agentId} initialized successfully`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load local index from persistent storage
|
||||
*/
|
||||
private async loadLocalIndex(): Promise<void> {
|
||||
try {
|
||||
// Placeholder for actual storage loading
|
||||
// In production, this would load from disk/database
|
||||
console.log(`[RegionalAgent:${this.config.region}] Loading local index from ${this.config.localStoragePath}`);
|
||||
|
||||
// Simulate loading
|
||||
this.localIndex.clear();
|
||||
|
||||
console.log(`[RegionalAgent:${this.config.region}] Local index loaded: ${this.localIndex.size} vectors`);
|
||||
} catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error loading local index:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register with coordinator
|
||||
*/
|
||||
private async registerWithCoordinator(): Promise<void> {
|
||||
try {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Registering with coordinator at ${this.config.coordinatorEndpoint}`);
|
||||
|
||||
// In production, this would be an HTTP/gRPC call
|
||||
// For now, emit event
|
||||
this.emit('coordinator:register', {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
endpoint: `https://${this.config.region}.ruvector.io/agent/${this.config.agentId}`,
|
||||
capabilities: this.config.capabilities,
|
||||
capacity: this.config.maxConcurrentStreams,
|
||||
registeredAt: Date.now(),
|
||||
});
|
||||
|
||||
console.log(`[RegionalAgent:${this.config.region}] Successfully registered with coordinator`);
|
||||
} catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Failed to register with coordinator:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process query request locally
|
||||
*/
|
||||
async processQuery(request: QueryRequest): Promise<QueryResult> {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Check rate limit
|
||||
if (!this.rateLimiter.tryAcquire()) {
|
||||
throw new Error('Rate limit exceeded');
|
||||
}
|
||||
|
||||
this.activeStreams++;
|
||||
this.totalQueries++;
|
||||
|
||||
try {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Processing query ${request.id}`);
|
||||
|
||||
// Validate query
|
||||
this.validateQuery(request);
|
||||
|
||||
// Execute vector search
|
||||
const matches = await this.searchVectors(request);
|
||||
|
||||
const latency = Date.now() - startTime;
|
||||
this.totalLatency += latency;
|
||||
|
||||
const result: QueryResult = {
|
||||
id: request.id,
|
||||
matches,
|
||||
latency,
|
||||
region: this.config.region,
|
||||
};
|
||||
|
||||
this.emit('query:completed', {
|
||||
queryId: request.id,
|
||||
latency,
|
||||
matchCount: matches.length,
|
||||
});
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Notify about query completion
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks notify --message "Query ${request.id} completed in ${latency}ms with ${matches.length} matches"`
|
||||
);
|
||||
} catch (error) {
|
||||
// Non-critical error
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
} catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error processing query ${request.id}:`, error);
|
||||
|
||||
this.emit('query:failed', {
|
||||
queryId: request.id,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
|
||||
throw error;
|
||||
|
||||
} finally {
|
||||
this.activeStreams--;
|
||||
this.rateLimiter.release();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate query request
|
||||
*/
|
||||
private validateQuery(request: QueryRequest): void {
|
||||
if (!request.vector || request.vector.length !== this.config.vectorDimensions) {
|
||||
throw new Error(
|
||||
`Invalid vector dimensions: expected ${this.config.vectorDimensions}, got ${request.vector?.length || 0}`
|
||||
);
|
||||
}
|
||||
|
||||
if (request.topK <= 0 || request.topK > 1000) {
|
||||
throw new Error(`Invalid topK value: ${request.topK} (must be between 1 and 1000)`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Search vectors in local index
|
||||
*/
|
||||
private async searchVectors(request: QueryRequest): Promise<QueryResult['matches']> {
|
||||
// Placeholder for actual vector search
|
||||
// In production, this would use FAISS, Annoy, or similar library
|
||||
|
||||
const matches: QueryResult['matches'] = [];
|
||||
|
||||
// Simulate vector search
|
||||
for (const [id, vector] of this.localIndex.entries()) {
|
||||
const score = this.calculateSimilarity(request.vector, vector);
|
||||
|
||||
// Apply filters if present
|
||||
if (request.filters && !this.matchesFilters(vector.metadata, request.filters)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
matches.push({
|
||||
id,
|
||||
score,
|
||||
metadata: vector.metadata || {},
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by score and return top-k
|
||||
matches.sort((a, b) => b.score - a.score);
|
||||
return matches.slice(0, request.topK);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate cosine similarity between vectors
|
||||
*/
|
||||
private calculateSimilarity(v1: number[], v2: number[]): number {
|
||||
let dotProduct = 0;
|
||||
let norm1 = 0;
|
||||
let norm2 = 0;
|
||||
|
||||
for (let i = 0; i < v1.length; i++) {
|
||||
dotProduct += v1[i] * v2[i];
|
||||
norm1 += v1[i] * v1[i];
|
||||
norm2 += v2[i] * v2[i];
|
||||
}
|
||||
|
||||
return dotProduct / (Math.sqrt(norm1) * Math.sqrt(norm2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if metadata matches filters
|
||||
*/
|
||||
private matchesFilters(metadata: Record<string, any>, filters: Record<string, any>): boolean {
|
||||
for (const [key, value] of Object.entries(filters)) {
|
||||
if (metadata[key] !== value) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add/update vectors in local index
|
||||
*/
|
||||
async indexVectors(vectors: Array<{ id: string; vector: number[]; metadata?: Record<string, any> }>): Promise<void> {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Indexing ${vectors.length} vectors`);
|
||||
|
||||
for (const { id, vector, metadata } of vectors) {
|
||||
this.localIndex.set(id, { vector, metadata });
|
||||
}
|
||||
|
||||
// Queue for cross-region sync
|
||||
this.syncQueue.push({
|
||||
type: 'index',
|
||||
data: vectors,
|
||||
timestamp: Date.now(),
|
||||
sourceRegion: this.config.region,
|
||||
});
|
||||
|
||||
this.emit('vectors:indexed', { count: vectors.length });
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks post-edit --file "local-index" --memory-key "swarm/${this.config.agentId}/index-update"`
|
||||
);
|
||||
} catch (error) {
|
||||
// Non-critical
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete vectors from local index
|
||||
*/
|
||||
async deleteVectors(ids: string[]): Promise<void> {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Deleting ${ids.length} vectors`);
|
||||
|
||||
for (const id of ids) {
|
||||
this.localIndex.delete(id);
|
||||
}
|
||||
|
||||
// Queue for cross-region sync
|
||||
this.syncQueue.push({
|
||||
type: 'delete',
|
||||
data: ids,
|
||||
timestamp: Date.now(),
|
||||
sourceRegion: this.config.region,
|
||||
});
|
||||
|
||||
this.emit('vectors:deleted', { count: ids.length });
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle sync payload from other regions
|
||||
*/
|
||||
async handleSyncPayload(payload: SyncPayload): Promise<void> {
|
||||
// Don't process our own sync messages
|
||||
if (payload.sourceRegion === this.config.region) {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[RegionalAgent:${this.config.region}] Received sync payload from ${payload.sourceRegion}: ${payload.type}`
|
||||
);
|
||||
|
||||
try {
|
||||
switch (payload.type) {
|
||||
case 'index':
|
||||
await this.indexVectors(payload.data);
|
||||
break;
|
||||
case 'update':
|
||||
await this.indexVectors(payload.data);
|
||||
break;
|
||||
case 'delete':
|
||||
await this.deleteVectors(payload.data);
|
||||
break;
|
||||
}
|
||||
|
||||
this.emit('sync:applied', {
|
||||
type: payload.type,
|
||||
sourceRegion: payload.sourceRegion,
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error applying sync payload:`, error);
|
||||
|
||||
this.emit('sync:failed', {
|
||||
type: payload.type,
|
||||
sourceRegion: payload.sourceRegion,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start metrics reporting loop
|
||||
*/
|
||||
private startMetricsReporting(): void {
|
||||
this.metricsTimer = setInterval(() => {
|
||||
this.reportMetrics();
|
||||
}, this.config.metricsReportInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Report metrics to coordinator
|
||||
*/
|
||||
private reportMetrics(): void {
|
||||
const metrics = {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
cpuUsage: this.getCpuUsage(),
|
||||
memoryUsage: this.getMemoryUsage(),
|
||||
activeStreams: this.activeStreams,
|
||||
queryLatency: this.totalQueries > 0 ? this.totalLatency / this.totalQueries : 0,
|
||||
timestamp: Date.now(),
|
||||
healthy: this.isHealthy(),
|
||||
};
|
||||
|
||||
this.emit('metrics:report', metrics);
|
||||
|
||||
// Reset counters (sliding window)
|
||||
if (this.totalQueries > 1000) {
|
||||
this.totalQueries = 0;
|
||||
this.totalLatency = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get CPU usage (placeholder)
|
||||
*/
|
||||
private getCpuUsage(): number {
|
||||
// In production, this would read from /proc/stat or similar
|
||||
return Math.random() * 100;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get memory usage (placeholder)
|
||||
*/
|
||||
private getMemoryUsage(): number {
|
||||
// In production, this would read from process.memoryUsage()
|
||||
const usage = process.memoryUsage();
|
||||
return (usage.heapUsed / usage.heapTotal) * 100;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if agent is healthy
|
||||
*/
|
||||
private isHealthy(): boolean {
|
||||
return (
|
||||
this.activeStreams < this.config.maxConcurrentStreams &&
|
||||
this.getMemoryUsage() < 90 &&
|
||||
this.getCpuUsage() < 90
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start sync process loop
|
||||
*/
|
||||
private startSyncProcess(): void {
|
||||
this.syncTimer = setInterval(() => {
|
||||
this.processSyncQueue();
|
||||
}, this.config.syncInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Process sync queue (send to other regions)
|
||||
*/
|
||||
private async processSyncQueue(): Promise<void> {
|
||||
if (this.syncQueue.length === 0) return;
|
||||
|
||||
const batch = this.syncQueue.splice(0, 100); // Process in batches
|
||||
|
||||
console.log(`[RegionalAgent:${this.config.region}] Processing sync batch: ${batch.length} items`);
|
||||
|
||||
for (const payload of batch) {
|
||||
this.emit('sync:broadcast', payload);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get agent status
|
||||
*/
|
||||
getStatus(): {
|
||||
agentId: string;
|
||||
region: string;
|
||||
healthy: boolean;
|
||||
activeStreams: number;
|
||||
indexSize: number;
|
||||
syncQueueSize: number;
|
||||
avgQueryLatency: number;
|
||||
} {
|
||||
return {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
healthy: this.isHealthy(),
|
||||
activeStreams: this.activeStreams,
|
||||
indexSize: this.localIndex.size,
|
||||
syncQueueSize: this.syncQueue.length,
|
||||
avgQueryLatency: this.totalQueries > 0 ? this.totalLatency / this.totalQueries : 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown agent gracefully
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Shutting down agent ${this.config.agentId}...`);
|
||||
|
||||
// Stop timers
|
||||
if (this.metricsTimer) {
|
||||
clearInterval(this.metricsTimer);
|
||||
}
|
||||
if (this.syncTimer) {
|
||||
clearInterval(this.syncTimer);
|
||||
}
|
||||
|
||||
// Process remaining sync queue
|
||||
await this.processSyncQueue();
|
||||
|
||||
// Save local index
|
||||
await this.saveLocalIndex();
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks post-task --task-id "agent-${this.config.agentId}-shutdown"`
|
||||
);
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks session-end --export-metrics true`
|
||||
);
|
||||
} catch (error) {
|
||||
console.warn(`[RegionalAgent:${this.config.region}] Error executing shutdown hooks:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('agent:shutdown', {
|
||||
agentId: this.config.agentId,
|
||||
region: this.config.region,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Save local index to persistent storage
|
||||
*/
|
||||
private async saveLocalIndex(): Promise<void> {
|
||||
try {
|
||||
console.log(`[RegionalAgent:${this.config.region}] Saving local index to ${this.config.localStoragePath}`);
|
||||
|
||||
// Placeholder for actual storage saving
|
||||
// In production, this would write to disk/database
|
||||
|
||||
console.log(`[RegionalAgent:${this.config.region}] Local index saved: ${this.localIndex.size} vectors`);
|
||||
} catch (error) {
|
||||
console.error(`[RegionalAgent:${this.config.region}] Error saving local index:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rate limiter for query processing
|
||||
*/
|
||||
class RateLimiter {
|
||||
private requests = 0;
|
||||
private windowStart = Date.now();
|
||||
|
||||
constructor(
|
||||
private config: {
|
||||
maxRequests: number;
|
||||
windowMs: number;
|
||||
}
|
||||
) {}
|
||||
|
||||
tryAcquire(): boolean {
|
||||
const now = Date.now();
|
||||
|
||||
// Reset window if expired
|
||||
if (now - this.windowStart >= this.config.windowMs) {
|
||||
this.requests = 0;
|
||||
this.windowStart = now;
|
||||
}
|
||||
|
||||
if (this.requests < this.config.maxRequests) {
|
||||
this.requests++;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
release(): void {
|
||||
if (this.requests > 0) {
|
||||
this.requests--;
|
||||
}
|
||||
}
|
||||
}
|
||||
144
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.d.ts
vendored
Normal file
144
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.d.ts
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
/**
|
||||
* Swarm Manager - Dynamic agent swarm management
|
||||
*
|
||||
* Handles:
|
||||
* - Dynamic agent spawning based on load
|
||||
* - Agent lifecycle management
|
||||
* - Topology management (mesh coordination)
|
||||
* - Memory/state sharing via claude-flow hooks
|
||||
*/
|
||||
import { EventEmitter } from 'events';
|
||||
import { AgentCoordinator } from './agent-coordinator';
|
||||
export interface SwarmConfig {
|
||||
topology: 'mesh' | 'hierarchical' | 'hybrid';
|
||||
minAgentsPerRegion: number;
|
||||
maxAgentsPerRegion: number;
|
||||
scaleUpThreshold: number;
|
||||
scaleDownThreshold: number;
|
||||
scaleUpCooldown: number;
|
||||
scaleDownCooldown: number;
|
||||
healthCheckInterval: number;
|
||||
enableAutoScaling: boolean;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
regions: string[];
|
||||
}
|
||||
export interface SwarmMetrics {
|
||||
totalAgents: number;
|
||||
activeAgents: number;
|
||||
totalLoad: number;
|
||||
averageLoad: number;
|
||||
regionMetrics: Record<string, RegionMetrics>;
|
||||
timestamp: number;
|
||||
}
|
||||
export interface RegionMetrics {
|
||||
region: string;
|
||||
agentCount: number;
|
||||
activeAgents: number;
|
||||
avgCpuUsage: number;
|
||||
avgMemoryUsage: number;
|
||||
totalStreams: number;
|
||||
avgQueryLatency: number;
|
||||
}
|
||||
export declare class SwarmManager extends EventEmitter {
|
||||
private config;
|
||||
private coordinator;
|
||||
private agents;
|
||||
private agentConfigs;
|
||||
private lastScaleUp;
|
||||
private lastScaleDown;
|
||||
private healthCheckTimer?;
|
||||
private autoScaleTimer?;
|
||||
private swarmMemory;
|
||||
private agentCounter;
|
||||
constructor(config: SwarmConfig, coordinator: AgentCoordinator);
|
||||
/**
|
||||
* Initialize swarm manager
|
||||
*/
|
||||
private initialize;
|
||||
/**
|
||||
* Spawn initial agents for each region
|
||||
*/
|
||||
private spawnInitialAgents;
|
||||
/**
|
||||
* Spawn a new agent in specific region
|
||||
*/
|
||||
spawnAgent(region: string, capacity?: number): Promise<string>;
|
||||
/**
|
||||
* Set up event handlers for agent
|
||||
*/
|
||||
private setupAgentEventHandlers;
|
||||
/**
|
||||
* Handle sync broadcast from agent
|
||||
*/
|
||||
private handleSyncBroadcast;
|
||||
/**
|
||||
* Despawn an agent
|
||||
*/
|
||||
despawnAgent(agentId: string): Promise<void>;
|
||||
/**
|
||||
* Handle agent shutdown
|
||||
*/
|
||||
private handleAgentShutdown;
|
||||
/**
|
||||
* Start health monitoring
|
||||
*/
|
||||
private startHealthMonitoring;
|
||||
/**
|
||||
* Perform health checks on all agents
|
||||
*/
|
||||
private performHealthChecks;
|
||||
/**
|
||||
* Start auto-scaling
|
||||
*/
|
||||
private startAutoScaling;
|
||||
/**
|
||||
* Evaluate if scaling is needed
|
||||
*/
|
||||
private evaluateScaling;
|
||||
/**
|
||||
* Check if can scale up (respects cooldown)
|
||||
*/
|
||||
private canScaleUp;
|
||||
/**
|
||||
* Check if can scale down (respects cooldown)
|
||||
*/
|
||||
private canScaleDown;
|
||||
/**
|
||||
* Scale up agents in region
|
||||
*/
|
||||
private scaleUp;
|
||||
/**
|
||||
* Scale down agents in region
|
||||
*/
|
||||
private scaleDown;
|
||||
/**
|
||||
* Calculate swarm metrics
|
||||
*/
|
||||
calculateSwarmMetrics(): SwarmMetrics;
|
||||
/**
|
||||
* Store data in swarm memory via claude-flow hooks
|
||||
*/
|
||||
private storeInMemory;
|
||||
/**
|
||||
* Retrieve data from swarm memory
|
||||
*/
|
||||
private retrieveFromMemory;
|
||||
/**
|
||||
* Remove data from swarm memory
|
||||
*/
|
||||
private removeFromMemory;
|
||||
/**
|
||||
* Get swarm status
|
||||
*/
|
||||
getStatus(): {
|
||||
topology: string;
|
||||
regions: string[];
|
||||
totalAgents: number;
|
||||
metrics: SwarmMetrics;
|
||||
};
|
||||
/**
|
||||
* Shutdown swarm gracefully
|
||||
*/
|
||||
shutdown(): Promise<void>;
|
||||
}
|
||||
//# sourceMappingURL=swarm-manager.d.ts.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.d.ts.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"swarm-manager.d.ts","sourceRoot":"","sources":["swarm-manager.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH,OAAO,EAAE,YAAY,EAAE,MAAM,QAAQ,CAAC;AAItC,OAAO,EAAE,gBAAgB,EAAqB,MAAM,qBAAqB,CAAC;AAI1E,MAAM,WAAW,WAAW;IAC1B,QAAQ,EAAE,MAAM,GAAG,cAAc,GAAG,QAAQ,CAAC;IAC7C,kBAAkB,EAAE,MAAM,CAAC;IAC3B,kBAAkB,EAAE,MAAM,CAAC;IAC3B,gBAAgB,EAAE,MAAM,CAAC;IACzB,kBAAkB,EAAE,MAAM,CAAC;IAC3B,eAAe,EAAE,MAAM,CAAC;IACxB,iBAAiB,EAAE,MAAM,CAAC;IAC1B,mBAAmB,EAAE,MAAM,CAAC;IAC5B,iBAAiB,EAAE,OAAO,CAAC;IAC3B,qBAAqB,EAAE,OAAO,CAAC;IAC/B,OAAO,EAAE,MAAM,EAAE,CAAC;CACnB;AAED,MAAM,WAAW,YAAY;IAC3B,WAAW,EAAE,MAAM,CAAC;IACpB,YAAY,EAAE,MAAM,CAAC;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,WAAW,EAAE,MAAM,CAAC;IACpB,aAAa,EAAE,MAAM,CAAC,MAAM,EAAE,aAAa,CAAC,CAAC;IAC7C,SAAS,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,WAAW,aAAa;IAC5B,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,EAAE,MAAM,CAAC;IACnB,YAAY,EAAE,MAAM,CAAC;IACrB,WAAW,EAAE,MAAM,CAAC;IACpB,cAAc,EAAE,MAAM,CAAC;IACvB,YAAY,EAAE,MAAM,CAAC;IACrB,eAAe,EAAE,MAAM,CAAC;CACzB;AAED,qBAAa,YAAa,SAAQ,YAAY;IAW1C,OAAO,CAAC,MAAM;IACd,OAAO,CAAC,WAAW;IAXrB,OAAO,CAAC,MAAM,CAAyC;IACvD,OAAO,CAAC,YAAY,CAA+C;IACnE,OAAO,CAAC,WAAW,CAAkC;IACrD,OAAO,CAAC,aAAa,CAAkC;IACvD,OAAO,CAAC,gBAAgB,CAAC,CAAiB;IAC1C,OAAO,CAAC,cAAc,CAAC,CAAiB;IACxC,OAAO,CAAC,WAAW,CAA+B;IAClD,OAAO,CAAC,YAAY,CAAK;gBAGf,MAAM,EAAE,WAAW,EACnB,WAAW,EAAE,gBAAgB;IAMvC;;OAEG;YACW,UAAU;IAmDxB;;OAEG;YACW,kBAAkB;IAgBhC;;OAEG;IACG,UAAU,CAAC,MAAM,EAAE,MAAM,EAAE,QAAQ,GAAE,MAAa,GAAG,OAAO,CAAC,MAAM,CAAC;IA+D1E;;OAEG;IACH,OAAO,CAAC,uBAAuB;IAuB/B;;OAEG;YACW,mBAAmB;IAejC;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAkClD;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAS3B;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAM7B;;OAEG;YACW,mBAAmB;IAyBjC;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAMxB;;OAEG;YACW,eAAe;IA4B7B;;OAEG;IACH,OAAO,CAAC,UAAU;IAKlB;;OAEG;IACH,OAAO,CAAC,YAAY;IAKpB;;OAEG;YACW,OAAO;IAWrB;;OAEG;YACW,SAAS;IA2BvB;;OAEG;IACH,qBAAqB,IAAI,YAAY;IA6DrC;;OAEG;YACW,aAAa;IAe3B;;OAEG;YACW,kBAAkB;IAIhC;;OAEG;YACW,gBAAgB;IAI9B;;OAEG;IACH,SAAS,IAAI;QACX,QAAQ,EAAE,MAAM,CAAC;QACjB,OAAO,EAAE,MAAM,EAAE,CAAC;QAClB,WAAW,EAAE,MAAM,CAAC;QACpB,OAAO,EAAE,YAAY,CAAC;KACvB;IASD;;OAEG;IACG,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;CAmChC"}
|
||||
453
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.js
vendored
Normal file
453
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.js
vendored
Normal file
@@ -0,0 +1,453 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Swarm Manager - Dynamic agent swarm management
|
||||
*
|
||||
* Handles:
|
||||
* - Dynamic agent spawning based on load
|
||||
* - Agent lifecycle management
|
||||
* - Topology management (mesh coordination)
|
||||
* - Memory/state sharing via claude-flow hooks
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SwarmManager = void 0;
|
||||
const events_1 = require("events");
|
||||
const child_process_1 = require("child_process");
|
||||
const util_1 = require("util");
|
||||
const regional_agent_1 = require("./regional-agent");
|
||||
const execAsync = (0, util_1.promisify)(child_process_1.exec);
|
||||
class SwarmManager extends events_1.EventEmitter {
|
||||
constructor(config, coordinator) {
|
||||
super();
|
||||
this.config = config;
|
||||
this.coordinator = coordinator;
|
||||
this.agents = new Map();
|
||||
this.agentConfigs = new Map();
|
||||
this.lastScaleUp = new Map();
|
||||
this.lastScaleDown = new Map();
|
||||
this.swarmMemory = new Map();
|
||||
this.agentCounter = 0;
|
||||
this.initialize();
|
||||
}
|
||||
/**
|
||||
* Initialize swarm manager
|
||||
*/
|
||||
async initialize() {
|
||||
console.log('[SwarmManager] Initializing swarm manager...');
|
||||
console.log(`[SwarmManager] Topology: ${this.config.topology}`);
|
||||
console.log(`[SwarmManager] Regions: ${this.config.regions.join(', ')}`);
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Initialize swarm coordination via claude-flow
|
||||
await execAsync(`npx claude-flow@alpha hooks pre-task --description "Initialize swarm manager with ${this.config.topology} topology"`);
|
||||
// Initialize swarm topology
|
||||
const topologyCmd = JSON.stringify({
|
||||
topology: this.config.topology,
|
||||
maxAgents: this.config.maxAgentsPerRegion * this.config.regions.length,
|
||||
}).replace(/"/g, '\\"');
|
||||
console.log('[SwarmManager] Initializing claude-flow swarm coordination...');
|
||||
// Store swarm configuration in memory
|
||||
await this.storeInMemory('swarm/config', this.config);
|
||||
console.log('[SwarmManager] Claude-flow hooks initialized');
|
||||
}
|
||||
catch (error) {
|
||||
console.warn('[SwarmManager] Claude-flow hooks not available:', error);
|
||||
}
|
||||
}
|
||||
// Spawn initial agents for each region
|
||||
await this.spawnInitialAgents();
|
||||
// Start health monitoring
|
||||
if (this.config.healthCheckInterval > 0) {
|
||||
this.startHealthMonitoring();
|
||||
}
|
||||
// Start auto-scaling
|
||||
if (this.config.enableAutoScaling) {
|
||||
this.startAutoScaling();
|
||||
}
|
||||
this.emit('swarm:initialized', {
|
||||
topology: this.config.topology,
|
||||
regions: this.config.regions,
|
||||
initialAgents: this.agents.size,
|
||||
});
|
||||
console.log(`[SwarmManager] Swarm initialized with ${this.agents.size} agents`);
|
||||
}
|
||||
/**
|
||||
* Spawn initial agents for each region
|
||||
*/
|
||||
async spawnInitialAgents() {
|
||||
console.log('[SwarmManager] Spawning initial agents...');
|
||||
const spawnPromises = [];
|
||||
for (const region of this.config.regions) {
|
||||
for (let i = 0; i < this.config.minAgentsPerRegion; i++) {
|
||||
spawnPromises.push(this.spawnAgent(region));
|
||||
}
|
||||
}
|
||||
await Promise.all(spawnPromises);
|
||||
console.log(`[SwarmManager] Spawned ${this.agents.size} initial agents`);
|
||||
}
|
||||
/**
|
||||
* Spawn a new agent in specific region
|
||||
*/
|
||||
async spawnAgent(region, capacity = 1000) {
|
||||
const agentId = `agent-${region}-${this.agentCounter++}`;
|
||||
console.log(`[SwarmManager] Spawning agent ${agentId} in ${region}`);
|
||||
const agentConfig = {
|
||||
agentId,
|
||||
region,
|
||||
coordinatorEndpoint: 'coordinator.ruvector.io',
|
||||
localStoragePath: `/var/lib/ruvector/${region}/${agentId}`,
|
||||
maxConcurrentStreams: 1000,
|
||||
metricsReportInterval: 30000, // 30 seconds
|
||||
syncInterval: 5000, // 5 seconds
|
||||
enableClaudeFlowHooks: this.config.enableClaudeFlowHooks,
|
||||
vectorDimensions: 768, // Default dimension
|
||||
capabilities: ['query', 'index', 'sync'],
|
||||
};
|
||||
// Create agent instance
|
||||
const agent = new regional_agent_1.RegionalAgent(agentConfig);
|
||||
// Set up event handlers
|
||||
this.setupAgentEventHandlers(agent, agentConfig);
|
||||
// Store agent
|
||||
this.agents.set(agentId, agent);
|
||||
this.agentConfigs.set(agentId, agentConfig);
|
||||
// Register with coordinator
|
||||
const registration = {
|
||||
agentId,
|
||||
region,
|
||||
endpoint: `https://${region}.ruvector.io/agent/${agentId}`,
|
||||
capabilities: agentConfig.capabilities,
|
||||
capacity,
|
||||
registeredAt: Date.now(),
|
||||
};
|
||||
await this.coordinator.registerAgent(registration);
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Notify about agent spawn
|
||||
await execAsync(`npx claude-flow@alpha hooks notify --message "Spawned agent ${agentId} in ${region}"`);
|
||||
// Store agent info in swarm memory
|
||||
await this.storeInMemory(`swarm/agents/${agentId}`, {
|
||||
config: agentConfig,
|
||||
registration,
|
||||
spawnedAt: Date.now(),
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
// Non-critical
|
||||
}
|
||||
}
|
||||
this.emit('agent:spawned', { agentId, region });
|
||||
return agentId;
|
||||
}
|
||||
/**
|
||||
* Set up event handlers for agent
|
||||
*/
|
||||
setupAgentEventHandlers(agent, config) {
|
||||
// Forward agent events to swarm manager
|
||||
agent.on('metrics:report', (metrics) => {
|
||||
this.coordinator.updateAgentMetrics(metrics);
|
||||
});
|
||||
agent.on('query:completed', (data) => {
|
||||
this.emit('query:completed', { ...data, agentId: config.agentId });
|
||||
});
|
||||
agent.on('query:failed', (data) => {
|
||||
this.emit('query:failed', { ...data, agentId: config.agentId });
|
||||
});
|
||||
agent.on('sync:broadcast', (payload) => {
|
||||
this.handleSyncBroadcast(payload, config.region);
|
||||
});
|
||||
agent.on('agent:shutdown', () => {
|
||||
this.handleAgentShutdown(config.agentId);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Handle sync broadcast from agent
|
||||
*/
|
||||
async handleSyncBroadcast(payload, sourceRegion) {
|
||||
// Broadcast to all agents in other regions
|
||||
for (const [agentId, agent] of this.agents.entries()) {
|
||||
const agentConfig = this.agentConfigs.get(agentId);
|
||||
if (agentConfig && agentConfig.region !== sourceRegion) {
|
||||
try {
|
||||
await agent.handleSyncPayload(payload);
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[SwarmManager] Error syncing to agent ${agentId}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Despawn an agent
|
||||
*/
|
||||
async despawnAgent(agentId) {
|
||||
console.log(`[SwarmManager] Despawning agent ${agentId}`);
|
||||
const agent = this.agents.get(agentId);
|
||||
if (!agent) {
|
||||
throw new Error(`Agent ${agentId} not found`);
|
||||
}
|
||||
// Unregister from coordinator
|
||||
await this.coordinator.unregisterAgent(agentId);
|
||||
// Shutdown agent
|
||||
await agent.shutdown();
|
||||
// Remove from tracking
|
||||
this.agents.delete(agentId);
|
||||
this.agentConfigs.delete(agentId);
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(`npx claude-flow@alpha hooks notify --message "Despawned agent ${agentId}"`);
|
||||
// Remove from swarm memory
|
||||
await this.removeFromMemory(`swarm/agents/${agentId}`);
|
||||
}
|
||||
catch (error) {
|
||||
// Non-critical
|
||||
}
|
||||
}
|
||||
this.emit('agent:despawned', { agentId });
|
||||
}
|
||||
/**
|
||||
* Handle agent shutdown
|
||||
*/
|
||||
handleAgentShutdown(agentId) {
|
||||
console.log(`[SwarmManager] Agent ${agentId} has shut down`);
|
||||
this.agents.delete(agentId);
|
||||
this.agentConfigs.delete(agentId);
|
||||
this.emit('agent:shutdown', { agentId });
|
||||
}
|
||||
/**
|
||||
* Start health monitoring
|
||||
*/
|
||||
startHealthMonitoring() {
|
||||
this.healthCheckTimer = setInterval(() => {
|
||||
this.performHealthChecks();
|
||||
}, this.config.healthCheckInterval);
|
||||
}
|
||||
/**
|
||||
* Perform health checks on all agents
|
||||
*/
|
||||
async performHealthChecks() {
|
||||
const unhealthyAgents = [];
|
||||
for (const [agentId, agent] of this.agents.entries()) {
|
||||
const status = agent.getStatus();
|
||||
if (!status.healthy) {
|
||||
unhealthyAgents.push(agentId);
|
||||
console.warn(`[SwarmManager] Agent ${agentId} is unhealthy`);
|
||||
}
|
||||
}
|
||||
if (unhealthyAgents.length > 0) {
|
||||
this.emit('health:check', {
|
||||
unhealthyAgents,
|
||||
totalAgents: this.agents.size,
|
||||
});
|
||||
}
|
||||
// Could implement auto-recovery here
|
||||
// for (const agentId of unhealthyAgents) {
|
||||
// await this.recoverAgent(agentId);
|
||||
// }
|
||||
}
|
||||
/**
|
||||
* Start auto-scaling
|
||||
*/
|
||||
startAutoScaling() {
|
||||
this.autoScaleTimer = setInterval(() => {
|
||||
this.evaluateScaling();
|
||||
}, 10000); // Evaluate every 10 seconds
|
||||
}
|
||||
/**
|
||||
* Evaluate if scaling is needed
|
||||
*/
|
||||
async evaluateScaling() {
|
||||
const metrics = this.calculateSwarmMetrics();
|
||||
for (const [region, regionMetrics] of Object.entries(metrics.regionMetrics)) {
|
||||
const avgLoad = (regionMetrics.avgCpuUsage + regionMetrics.avgMemoryUsage) / 2;
|
||||
// Check scale-up condition
|
||||
if (avgLoad > this.config.scaleUpThreshold &&
|
||||
regionMetrics.agentCount < this.config.maxAgentsPerRegion &&
|
||||
this.canScaleUp(region)) {
|
||||
console.log(`[SwarmManager] Scaling up in region ${region} (load: ${avgLoad.toFixed(1)}%)`);
|
||||
await this.scaleUp(region);
|
||||
}
|
||||
// Check scale-down condition
|
||||
if (avgLoad < this.config.scaleDownThreshold &&
|
||||
regionMetrics.agentCount > this.config.minAgentsPerRegion &&
|
||||
this.canScaleDown(region)) {
|
||||
console.log(`[SwarmManager] Scaling down in region ${region} (load: ${avgLoad.toFixed(1)}%)`);
|
||||
await this.scaleDown(region);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Check if can scale up (respects cooldown)
|
||||
*/
|
||||
canScaleUp(region) {
|
||||
const lastScaleUp = this.lastScaleUp.get(region) || 0;
|
||||
return Date.now() - lastScaleUp > this.config.scaleUpCooldown;
|
||||
}
|
||||
/**
|
||||
* Check if can scale down (respects cooldown)
|
||||
*/
|
||||
canScaleDown(region) {
|
||||
const lastScaleDown = this.lastScaleDown.get(region) || 0;
|
||||
return Date.now() - lastScaleDown > this.config.scaleDownCooldown;
|
||||
}
|
||||
/**
|
||||
* Scale up agents in region
|
||||
*/
|
||||
async scaleUp(region) {
|
||||
try {
|
||||
await this.spawnAgent(region);
|
||||
this.lastScaleUp.set(region, Date.now());
|
||||
this.emit('swarm:scale-up', { region, totalAgents: this.agents.size });
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[SwarmManager] Error scaling up in ${region}:`, error);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Scale down agents in region
|
||||
*/
|
||||
async scaleDown(region) {
|
||||
// Find agent with lowest load in region
|
||||
const regionAgents = Array.from(this.agents.entries())
|
||||
.filter(([_, agent]) => {
|
||||
const config = this.agentConfigs.get(agent.getStatus().agentId);
|
||||
return config?.region === region;
|
||||
})
|
||||
.map(([agentId, agent]) => ({
|
||||
agentId,
|
||||
status: agent.getStatus(),
|
||||
}))
|
||||
.sort((a, b) => a.status.activeStreams - b.status.activeStreams);
|
||||
if (regionAgents.length > 0) {
|
||||
const agentToDespawn = regionAgents[0];
|
||||
try {
|
||||
await this.despawnAgent(agentToDespawn.agentId);
|
||||
this.lastScaleDown.set(region, Date.now());
|
||||
this.emit('swarm:scale-down', { region, totalAgents: this.agents.size });
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`[SwarmManager] Error scaling down in ${region}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Calculate swarm metrics
|
||||
*/
|
||||
calculateSwarmMetrics() {
|
||||
const regionMetrics = {};
|
||||
let totalLoad = 0;
|
||||
let activeAgents = 0;
|
||||
// Initialize region metrics
|
||||
for (const region of this.config.regions) {
|
||||
regionMetrics[region] = {
|
||||
region,
|
||||
agentCount: 0,
|
||||
activeAgents: 0,
|
||||
avgCpuUsage: 0,
|
||||
avgMemoryUsage: 0,
|
||||
totalStreams: 0,
|
||||
avgQueryLatency: 0,
|
||||
};
|
||||
}
|
||||
// Aggregate metrics
|
||||
for (const [agentId, agent] of this.agents.entries()) {
|
||||
const status = agent.getStatus();
|
||||
const config = this.agentConfigs.get(agentId);
|
||||
if (!config)
|
||||
continue;
|
||||
const regionMetric = regionMetrics[config.region];
|
||||
regionMetric.agentCount++;
|
||||
if (status.healthy) {
|
||||
activeAgents++;
|
||||
regionMetric.activeAgents++;
|
||||
}
|
||||
regionMetric.totalStreams += status.activeStreams;
|
||||
regionMetric.avgQueryLatency += status.avgQueryLatency;
|
||||
// Note: In production, we would get actual CPU/memory metrics
|
||||
totalLoad += status.activeStreams;
|
||||
}
|
||||
// Calculate averages
|
||||
for (const region of this.config.regions) {
|
||||
const metric = regionMetrics[region];
|
||||
if (metric.agentCount > 0) {
|
||||
metric.avgQueryLatency /= metric.agentCount;
|
||||
// Placeholder for actual CPU/memory aggregation
|
||||
metric.avgCpuUsage = Math.random() * 100;
|
||||
metric.avgMemoryUsage = Math.random() * 100;
|
||||
}
|
||||
}
|
||||
return {
|
||||
totalAgents: this.agents.size,
|
||||
activeAgents,
|
||||
totalLoad,
|
||||
averageLoad: this.agents.size > 0 ? totalLoad / this.agents.size : 0,
|
||||
regionMetrics,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Store data in swarm memory via claude-flow hooks
|
||||
*/
|
||||
async storeInMemory(key, value) {
|
||||
this.swarmMemory.set(key, value);
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
const serialized = JSON.stringify(value).replace(/"/g, '\\"');
|
||||
await execAsync(`npx claude-flow@alpha hooks post-edit --file "swarm-memory" --memory-key "${key}"`);
|
||||
}
|
||||
catch (error) {
|
||||
console.warn(`[SwarmManager] Error storing in memory: ${key}`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Retrieve data from swarm memory
|
||||
*/
|
||||
async retrieveFromMemory(key) {
|
||||
return this.swarmMemory.get(key);
|
||||
}
|
||||
/**
|
||||
* Remove data from swarm memory
|
||||
*/
|
||||
async removeFromMemory(key) {
|
||||
this.swarmMemory.delete(key);
|
||||
}
|
||||
/**
|
||||
* Get swarm status
|
||||
*/
|
||||
getStatus() {
|
||||
return {
|
||||
topology: this.config.topology,
|
||||
regions: this.config.regions,
|
||||
totalAgents: this.agents.size,
|
||||
metrics: this.calculateSwarmMetrics(),
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Shutdown swarm gracefully
|
||||
*/
|
||||
async shutdown() {
|
||||
console.log('[SwarmManager] Shutting down swarm...');
|
||||
// Stop timers
|
||||
if (this.healthCheckTimer) {
|
||||
clearInterval(this.healthCheckTimer);
|
||||
}
|
||||
if (this.autoScaleTimer) {
|
||||
clearInterval(this.autoScaleTimer);
|
||||
}
|
||||
// Shutdown all agents
|
||||
const shutdownPromises = Array.from(this.agents.keys()).map(agentId => this.despawnAgent(agentId));
|
||||
await Promise.all(shutdownPromises);
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(`npx claude-flow@alpha hooks post-task --task-id "swarm-shutdown"`);
|
||||
await execAsync(`npx claude-flow@alpha hooks session-end --export-metrics true`);
|
||||
}
|
||||
catch (error) {
|
||||
console.warn('[SwarmManager] Error executing shutdown hooks:', error);
|
||||
}
|
||||
}
|
||||
this.emit('swarm:shutdown');
|
||||
console.log('[SwarmManager] Swarm shutdown complete');
|
||||
}
|
||||
}
|
||||
exports.SwarmManager = SwarmManager;
|
||||
//# sourceMappingURL=swarm-manager.js.map
|
||||
1
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.js.map
vendored
Normal file
1
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
590
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.ts
vendored
Normal file
590
vendor/ruvector/npm/packages/agentic-integration/swarm-manager.ts
vendored
Normal file
@@ -0,0 +1,590 @@
|
||||
/**
|
||||
* Swarm Manager - Dynamic agent swarm management
|
||||
*
|
||||
* Handles:
|
||||
* - Dynamic agent spawning based on load
|
||||
* - Agent lifecycle management
|
||||
* - Topology management (mesh coordination)
|
||||
* - Memory/state sharing via claude-flow hooks
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { RegionalAgent, RegionalAgentConfig } from './regional-agent';
|
||||
import { AgentCoordinator, AgentRegistration } from './agent-coordinator';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
export interface SwarmConfig {
|
||||
topology: 'mesh' | 'hierarchical' | 'hybrid';
|
||||
minAgentsPerRegion: number;
|
||||
maxAgentsPerRegion: number;
|
||||
scaleUpThreshold: number; // CPU/memory threshold to trigger scale-up
|
||||
scaleDownThreshold: number; // Threshold to trigger scale-down
|
||||
scaleUpCooldown: number; // Cooldown period between scale-ups (ms)
|
||||
scaleDownCooldown: number; // Cooldown period between scale-downs (ms)
|
||||
healthCheckInterval: number;
|
||||
enableAutoScaling: boolean;
|
||||
enableClaudeFlowHooks: boolean;
|
||||
regions: string[];
|
||||
}
|
||||
|
||||
export interface SwarmMetrics {
|
||||
totalAgents: number;
|
||||
activeAgents: number;
|
||||
totalLoad: number;
|
||||
averageLoad: number;
|
||||
regionMetrics: Record<string, RegionMetrics>;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface RegionMetrics {
|
||||
region: string;
|
||||
agentCount: number;
|
||||
activeAgents: number;
|
||||
avgCpuUsage: number;
|
||||
avgMemoryUsage: number;
|
||||
totalStreams: number;
|
||||
avgQueryLatency: number;
|
||||
}
|
||||
|
||||
export class SwarmManager extends EventEmitter {
|
||||
private agents: Map<string, RegionalAgent> = new Map();
|
||||
private agentConfigs: Map<string, RegionalAgentConfig> = new Map();
|
||||
private lastScaleUp: Map<string, number> = new Map();
|
||||
private lastScaleDown: Map<string, number> = new Map();
|
||||
private healthCheckTimer?: NodeJS.Timeout;
|
||||
private autoScaleTimer?: NodeJS.Timeout;
|
||||
private swarmMemory: Map<string, any> = new Map();
|
||||
private agentCounter = 0;
|
||||
|
||||
constructor(
|
||||
private config: SwarmConfig,
|
||||
private coordinator: AgentCoordinator
|
||||
) {
|
||||
super();
|
||||
this.initialize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize swarm manager
|
||||
*/
|
||||
private async initialize(): Promise<void> {
|
||||
console.log('[SwarmManager] Initializing swarm manager...');
|
||||
console.log(`[SwarmManager] Topology: ${this.config.topology}`);
|
||||
console.log(`[SwarmManager] Regions: ${this.config.regions.join(', ')}`);
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Initialize swarm coordination via claude-flow
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks pre-task --description "Initialize swarm manager with ${this.config.topology} topology"`
|
||||
);
|
||||
|
||||
// Initialize swarm topology
|
||||
const topologyCmd = JSON.stringify({
|
||||
topology: this.config.topology,
|
||||
maxAgents: this.config.maxAgentsPerRegion * this.config.regions.length,
|
||||
}).replace(/"/g, '\\"');
|
||||
|
||||
console.log('[SwarmManager] Initializing claude-flow swarm coordination...');
|
||||
|
||||
// Store swarm configuration in memory
|
||||
await this.storeInMemory('swarm/config', this.config);
|
||||
|
||||
console.log('[SwarmManager] Claude-flow hooks initialized');
|
||||
} catch (error) {
|
||||
console.warn('[SwarmManager] Claude-flow hooks not available:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Spawn initial agents for each region
|
||||
await this.spawnInitialAgents();
|
||||
|
||||
// Start health monitoring
|
||||
if (this.config.healthCheckInterval > 0) {
|
||||
this.startHealthMonitoring();
|
||||
}
|
||||
|
||||
// Start auto-scaling
|
||||
if (this.config.enableAutoScaling) {
|
||||
this.startAutoScaling();
|
||||
}
|
||||
|
||||
this.emit('swarm:initialized', {
|
||||
topology: this.config.topology,
|
||||
regions: this.config.regions,
|
||||
initialAgents: this.agents.size,
|
||||
});
|
||||
|
||||
console.log(`[SwarmManager] Swarm initialized with ${this.agents.size} agents`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Spawn initial agents for each region
|
||||
*/
|
||||
private async spawnInitialAgents(): Promise<void> {
|
||||
console.log('[SwarmManager] Spawning initial agents...');
|
||||
|
||||
const spawnPromises: Promise<void>[] = [];
|
||||
|
||||
for (const region of this.config.regions) {
|
||||
for (let i = 0; i < this.config.minAgentsPerRegion; i++) {
|
||||
spawnPromises.push(this.spawnAgent(region));
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(spawnPromises);
|
||||
|
||||
console.log(`[SwarmManager] Spawned ${this.agents.size} initial agents`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Spawn a new agent in specific region
|
||||
*/
|
||||
async spawnAgent(region: string, capacity: number = 1000): Promise<string> {
|
||||
const agentId = `agent-${region}-${this.agentCounter++}`;
|
||||
|
||||
console.log(`[SwarmManager] Spawning agent ${agentId} in ${region}`);
|
||||
|
||||
const agentConfig: RegionalAgentConfig = {
|
||||
agentId,
|
||||
region,
|
||||
coordinatorEndpoint: 'coordinator.ruvector.io',
|
||||
localStoragePath: `/var/lib/ruvector/${region}/${agentId}`,
|
||||
maxConcurrentStreams: 1000,
|
||||
metricsReportInterval: 30000, // 30 seconds
|
||||
syncInterval: 5000, // 5 seconds
|
||||
enableClaudeFlowHooks: this.config.enableClaudeFlowHooks,
|
||||
vectorDimensions: 768, // Default dimension
|
||||
capabilities: ['query', 'index', 'sync'],
|
||||
};
|
||||
|
||||
// Create agent instance
|
||||
const agent = new RegionalAgent(agentConfig);
|
||||
|
||||
// Set up event handlers
|
||||
this.setupAgentEventHandlers(agent, agentConfig);
|
||||
|
||||
// Store agent
|
||||
this.agents.set(agentId, agent);
|
||||
this.agentConfigs.set(agentId, agentConfig);
|
||||
|
||||
// Register with coordinator
|
||||
const registration: AgentRegistration = {
|
||||
agentId,
|
||||
region,
|
||||
endpoint: `https://${region}.ruvector.io/agent/${agentId}`,
|
||||
capabilities: agentConfig.capabilities,
|
||||
capacity,
|
||||
registeredAt: Date.now(),
|
||||
};
|
||||
|
||||
await this.coordinator.registerAgent(registration);
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
// Notify about agent spawn
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks notify --message "Spawned agent ${agentId} in ${region}"`
|
||||
);
|
||||
|
||||
// Store agent info in swarm memory
|
||||
await this.storeInMemory(`swarm/agents/${agentId}`, {
|
||||
config: agentConfig,
|
||||
registration,
|
||||
spawnedAt: Date.now(),
|
||||
});
|
||||
} catch (error) {
|
||||
// Non-critical
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('agent:spawned', { agentId, region });
|
||||
|
||||
return agentId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up event handlers for agent
|
||||
*/
|
||||
private setupAgentEventHandlers(agent: RegionalAgent, config: RegionalAgentConfig): void {
|
||||
// Forward agent events to swarm manager
|
||||
agent.on('metrics:report', (metrics) => {
|
||||
this.coordinator.updateAgentMetrics(metrics);
|
||||
});
|
||||
|
||||
agent.on('query:completed', (data) => {
|
||||
this.emit('query:completed', { ...data, agentId: config.agentId });
|
||||
});
|
||||
|
||||
agent.on('query:failed', (data) => {
|
||||
this.emit('query:failed', { ...data, agentId: config.agentId });
|
||||
});
|
||||
|
||||
agent.on('sync:broadcast', (payload) => {
|
||||
this.handleSyncBroadcast(payload, config.region);
|
||||
});
|
||||
|
||||
agent.on('agent:shutdown', () => {
|
||||
this.handleAgentShutdown(config.agentId);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle sync broadcast from agent
|
||||
*/
|
||||
private async handleSyncBroadcast(payload: any, sourceRegion: string): Promise<void> {
|
||||
// Broadcast to all agents in other regions
|
||||
for (const [agentId, agent] of this.agents.entries()) {
|
||||
const agentConfig = this.agentConfigs.get(agentId);
|
||||
|
||||
if (agentConfig && agentConfig.region !== sourceRegion) {
|
||||
try {
|
||||
await agent.handleSyncPayload(payload);
|
||||
} catch (error) {
|
||||
console.error(`[SwarmManager] Error syncing to agent ${agentId}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Despawn an agent
|
||||
*/
|
||||
async despawnAgent(agentId: string): Promise<void> {
|
||||
console.log(`[SwarmManager] Despawning agent ${agentId}`);
|
||||
|
||||
const agent = this.agents.get(agentId);
|
||||
if (!agent) {
|
||||
throw new Error(`Agent ${agentId} not found`);
|
||||
}
|
||||
|
||||
// Unregister from coordinator
|
||||
await this.coordinator.unregisterAgent(agentId);
|
||||
|
||||
// Shutdown agent
|
||||
await agent.shutdown();
|
||||
|
||||
// Remove from tracking
|
||||
this.agents.delete(agentId);
|
||||
this.agentConfigs.delete(agentId);
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks notify --message "Despawned agent ${agentId}"`
|
||||
);
|
||||
|
||||
// Remove from swarm memory
|
||||
await this.removeFromMemory(`swarm/agents/${agentId}`);
|
||||
} catch (error) {
|
||||
// Non-critical
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('agent:despawned', { agentId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle agent shutdown
|
||||
*/
|
||||
private handleAgentShutdown(agentId: string): void {
|
||||
console.log(`[SwarmManager] Agent ${agentId} has shut down`);
|
||||
|
||||
this.agents.delete(agentId);
|
||||
this.agentConfigs.delete(agentId);
|
||||
|
||||
this.emit('agent:shutdown', { agentId });
|
||||
}
|
||||
|
||||
/**
|
||||
* Start health monitoring
|
||||
*/
|
||||
private startHealthMonitoring(): void {
|
||||
this.healthCheckTimer = setInterval(() => {
|
||||
this.performHealthChecks();
|
||||
}, this.config.healthCheckInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform health checks on all agents
|
||||
*/
|
||||
private async performHealthChecks(): Promise<void> {
|
||||
const unhealthyAgents: string[] = [];
|
||||
|
||||
for (const [agentId, agent] of this.agents.entries()) {
|
||||
const status = agent.getStatus();
|
||||
|
||||
if (!status.healthy) {
|
||||
unhealthyAgents.push(agentId);
|
||||
console.warn(`[SwarmManager] Agent ${agentId} is unhealthy`);
|
||||
}
|
||||
}
|
||||
|
||||
if (unhealthyAgents.length > 0) {
|
||||
this.emit('health:check', {
|
||||
unhealthyAgents,
|
||||
totalAgents: this.agents.size,
|
||||
});
|
||||
}
|
||||
|
||||
// Could implement auto-recovery here
|
||||
// for (const agentId of unhealthyAgents) {
|
||||
// await this.recoverAgent(agentId);
|
||||
// }
|
||||
}
|
||||
|
||||
/**
|
||||
* Start auto-scaling
|
||||
*/
|
||||
private startAutoScaling(): void {
|
||||
this.autoScaleTimer = setInterval(() => {
|
||||
this.evaluateScaling();
|
||||
}, 10000); // Evaluate every 10 seconds
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate if scaling is needed
|
||||
*/
|
||||
private async evaluateScaling(): Promise<void> {
|
||||
const metrics = this.calculateSwarmMetrics();
|
||||
|
||||
for (const [region, regionMetrics] of Object.entries(metrics.regionMetrics)) {
|
||||
const avgLoad = (regionMetrics.avgCpuUsage + regionMetrics.avgMemoryUsage) / 2;
|
||||
|
||||
// Check scale-up condition
|
||||
if (
|
||||
avgLoad > this.config.scaleUpThreshold &&
|
||||
regionMetrics.agentCount < this.config.maxAgentsPerRegion &&
|
||||
this.canScaleUp(region)
|
||||
) {
|
||||
console.log(`[SwarmManager] Scaling up in region ${region} (load: ${avgLoad.toFixed(1)}%)`);
|
||||
await this.scaleUp(region);
|
||||
}
|
||||
|
||||
// Check scale-down condition
|
||||
if (
|
||||
avgLoad < this.config.scaleDownThreshold &&
|
||||
regionMetrics.agentCount > this.config.minAgentsPerRegion &&
|
||||
this.canScaleDown(region)
|
||||
) {
|
||||
console.log(`[SwarmManager] Scaling down in region ${region} (load: ${avgLoad.toFixed(1)}%)`);
|
||||
await this.scaleDown(region);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if can scale up (respects cooldown)
|
||||
*/
|
||||
private canScaleUp(region: string): boolean {
|
||||
const lastScaleUp = this.lastScaleUp.get(region) || 0;
|
||||
return Date.now() - lastScaleUp > this.config.scaleUpCooldown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if can scale down (respects cooldown)
|
||||
*/
|
||||
private canScaleDown(region: string): boolean {
|
||||
const lastScaleDown = this.lastScaleDown.get(region) || 0;
|
||||
return Date.now() - lastScaleDown > this.config.scaleDownCooldown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Scale up agents in region
|
||||
*/
|
||||
private async scaleUp(region: string): Promise<void> {
|
||||
try {
|
||||
await this.spawnAgent(region);
|
||||
this.lastScaleUp.set(region, Date.now());
|
||||
|
||||
this.emit('swarm:scale-up', { region, totalAgents: this.agents.size });
|
||||
} catch (error) {
|
||||
console.error(`[SwarmManager] Error scaling up in ${region}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Scale down agents in region
|
||||
*/
|
||||
private async scaleDown(region: string): Promise<void> {
|
||||
// Find agent with lowest load in region
|
||||
const regionAgents = Array.from(this.agents.entries())
|
||||
.filter(([_, agent]) => {
|
||||
const config = this.agentConfigs.get(agent.getStatus().agentId);
|
||||
return config?.region === region;
|
||||
})
|
||||
.map(([agentId, agent]) => ({
|
||||
agentId,
|
||||
status: agent.getStatus(),
|
||||
}))
|
||||
.sort((a, b) => a.status.activeStreams - b.status.activeStreams);
|
||||
|
||||
if (regionAgents.length > 0) {
|
||||
const agentToDespawn = regionAgents[0];
|
||||
|
||||
try {
|
||||
await this.despawnAgent(agentToDespawn.agentId);
|
||||
this.lastScaleDown.set(region, Date.now());
|
||||
|
||||
this.emit('swarm:scale-down', { region, totalAgents: this.agents.size });
|
||||
} catch (error) {
|
||||
console.error(`[SwarmManager] Error scaling down in ${region}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate swarm metrics
|
||||
*/
|
||||
calculateSwarmMetrics(): SwarmMetrics {
|
||||
const regionMetrics: Record<string, RegionMetrics> = {};
|
||||
let totalLoad = 0;
|
||||
let activeAgents = 0;
|
||||
|
||||
// Initialize region metrics
|
||||
for (const region of this.config.regions) {
|
||||
regionMetrics[region] = {
|
||||
region,
|
||||
agentCount: 0,
|
||||
activeAgents: 0,
|
||||
avgCpuUsage: 0,
|
||||
avgMemoryUsage: 0,
|
||||
totalStreams: 0,
|
||||
avgQueryLatency: 0,
|
||||
};
|
||||
}
|
||||
|
||||
// Aggregate metrics
|
||||
for (const [agentId, agent] of this.agents.entries()) {
|
||||
const status = agent.getStatus();
|
||||
const config = this.agentConfigs.get(agentId);
|
||||
|
||||
if (!config) continue;
|
||||
|
||||
const regionMetric = regionMetrics[config.region];
|
||||
regionMetric.agentCount++;
|
||||
|
||||
if (status.healthy) {
|
||||
activeAgents++;
|
||||
regionMetric.activeAgents++;
|
||||
}
|
||||
|
||||
regionMetric.totalStreams += status.activeStreams;
|
||||
regionMetric.avgQueryLatency += status.avgQueryLatency;
|
||||
|
||||
// Note: In production, we would get actual CPU/memory metrics
|
||||
totalLoad += status.activeStreams;
|
||||
}
|
||||
|
||||
// Calculate averages
|
||||
for (const region of this.config.regions) {
|
||||
const metric = regionMetrics[region];
|
||||
if (metric.agentCount > 0) {
|
||||
metric.avgQueryLatency /= metric.agentCount;
|
||||
// Placeholder for actual CPU/memory aggregation
|
||||
metric.avgCpuUsage = Math.random() * 100;
|
||||
metric.avgMemoryUsage = Math.random() * 100;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
totalAgents: this.agents.size,
|
||||
activeAgents,
|
||||
totalLoad,
|
||||
averageLoad: this.agents.size > 0 ? totalLoad / this.agents.size : 0,
|
||||
regionMetrics,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Store data in swarm memory via claude-flow hooks
|
||||
*/
|
||||
private async storeInMemory(key: string, value: any): Promise<void> {
|
||||
this.swarmMemory.set(key, value);
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
const serialized = JSON.stringify(value).replace(/"/g, '\\"');
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks post-edit --file "swarm-memory" --memory-key "${key}"`
|
||||
);
|
||||
} catch (error) {
|
||||
console.warn(`[SwarmManager] Error storing in memory: ${key}`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve data from swarm memory
|
||||
*/
|
||||
private async retrieveFromMemory(key: string): Promise<any> {
|
||||
return this.swarmMemory.get(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove data from swarm memory
|
||||
*/
|
||||
private async removeFromMemory(key: string): Promise<void> {
|
||||
this.swarmMemory.delete(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get swarm status
|
||||
*/
|
||||
getStatus(): {
|
||||
topology: string;
|
||||
regions: string[];
|
||||
totalAgents: number;
|
||||
metrics: SwarmMetrics;
|
||||
} {
|
||||
return {
|
||||
topology: this.config.topology,
|
||||
regions: this.config.regions,
|
||||
totalAgents: this.agents.size,
|
||||
metrics: this.calculateSwarmMetrics(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown swarm gracefully
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
console.log('[SwarmManager] Shutting down swarm...');
|
||||
|
||||
// Stop timers
|
||||
if (this.healthCheckTimer) {
|
||||
clearInterval(this.healthCheckTimer);
|
||||
}
|
||||
if (this.autoScaleTimer) {
|
||||
clearInterval(this.autoScaleTimer);
|
||||
}
|
||||
|
||||
// Shutdown all agents
|
||||
const shutdownPromises = Array.from(this.agents.keys()).map(agentId =>
|
||||
this.despawnAgent(agentId)
|
||||
);
|
||||
|
||||
await Promise.all(shutdownPromises);
|
||||
|
||||
if (this.config.enableClaudeFlowHooks) {
|
||||
try {
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks post-task --task-id "swarm-shutdown"`
|
||||
);
|
||||
await execAsync(
|
||||
`npx claude-flow@alpha hooks session-end --export-metrics true`
|
||||
);
|
||||
} catch (error) {
|
||||
console.warn('[SwarmManager] Error executing shutdown hooks:', error);
|
||||
}
|
||||
}
|
||||
|
||||
this.emit('swarm:shutdown');
|
||||
|
||||
console.log('[SwarmManager] Swarm shutdown complete');
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user