Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,657 @@
# Multi-Agent Swarm Coordination Examples
Comprehensive examples demonstrating synthetic data generation for multi-agent systems, distributed processing, collective intelligence, and agent lifecycle management using agentic-synth.
## Overview
This directory contains production-ready examples for generating realistic test data for:
- **Agent Coordination**: Communication patterns, task distribution, consensus building, load balancing, and fault tolerance
- **Distributed Processing**: Map-reduce jobs, worker pools, message queues, event-driven architectures, and saga transactions
- **Collective Intelligence**: Collaborative problem-solving, knowledge sharing, emergent behaviors, voting systems, and reputation tracking
- **Agent Lifecycle**: Spawning/termination, state synchronization, health checks, recovery patterns, and version migrations
## Quick Start
### Installation
```bash
# Install agentic-synth
npm install @ruvector/agentic-synth
# Optional: Install integration packages
npm install claude-flow@alpha # For swarm orchestration
npm install ruv-swarm # For enhanced coordination
npm install flow-nexus@latest # For cloud features
```
### Basic Usage
```typescript
import { createSynth } from '@ruvector/agentic-synth';
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY,
cacheStrategy: 'memory',
});
// Generate agent communication data
const messages = await synth.generateEvents({
count: 500,
eventTypes: ['direct_message', 'broadcast', 'request_reply'],
schema: {
sender_agent_id: 'agent-{1-20}',
receiver_agent_id: 'agent-{1-20}',
payload: { action: 'string', data: 'object' },
timestamp: 'ISO timestamp',
},
});
```
## Examples
### 1. Agent Coordination (`agent-coordination.ts`)
Generate data for multi-agent communication and coordination patterns.
**Examples:**
- Agent communication patterns (direct messages, broadcasts, pub/sub)
- Task distribution scenarios with load balancing
- Consensus building (Raft, Paxos, Byzantine)
- Load balancing metrics and patterns
- Fault tolerance and failure recovery
- Hierarchical swarm coordination
**Run:**
```bash
npx tsx examples/swarms/agent-coordination.ts
```
**Integration with claude-flow:**
```bash
# Initialize swarm coordination
npx claude-flow@alpha hooks pre-task --description "Agent coordination"
npx claude-flow@alpha hooks notify --message "Coordination data generated"
npx claude-flow@alpha hooks post-edit --memory-key "swarm/coordinator/messages"
```
**Key Features:**
- 500+ communication events with realistic latency
- 300 task distribution scenarios with dependencies
- 50 consensus rounds with multiple protocols
- 100 agent metrics for load balancing analysis
- 100 failure scenarios with recovery actions
- Hierarchical topology with sub-coordinators
### 2. Distributed Processing (`distributed-processing.ts`)
Generate data for distributed computation and processing systems.
**Examples:**
- Map-reduce job execution data
- Worker pool simulation and metrics
- Message queue scenarios (RabbitMQ, SQS)
- Event-driven architecture (Kafka, EventBridge)
- Saga pattern distributed transactions
- Stream processing pipelines (Kafka Streams, Flink)
**Run:**
```bash
npx tsx examples/swarms/distributed-processing.ts
```
**Integration with Message Queues:**
```typescript
// RabbitMQ
const channel = await connection.createChannel();
await channel.assertQueue('tasks', { durable: true });
channel.sendToQueue('tasks', Buffer.from(JSON.stringify(taskData)));
// Kafka
await producer.send({
topic: 'events',
messages: generatedEvents.map(e => ({ value: JSON.stringify(e) })),
});
```
**Key Features:**
- 20 map-reduce jobs with mapper/reducer task tracking
- 200 worker pool states with utilization metrics
- 1,000 message queue messages with priority handling
- 2,000 event-driven architecture events
- 100 saga pattern transactions with compensation
- Stream processing with windowed aggregations
### 3. Collective Intelligence (`collective-intelligence.ts`)
Generate data for swarm intelligence and collaborative systems.
**Examples:**
- Collaborative problem-solving sessions
- Knowledge sharing and transfer patterns
- Emergent behavior simulation
- Voting and consensus mechanisms
- Reputation and trust systems
**Run:**
```bash
npx tsx examples/swarms/collective-intelligence.ts
```
**Integration with AgenticDB:**
```typescript
import AgenticDB from 'agenticdb';
const db = new AgenticDB();
// Store knowledge embeddings
await db.storeVector({
text: knowledge.content,
metadata: { category: knowledge.category, rating: knowledge.quality_rating },
});
// Semantic search for similar knowledge
const results = await db.search({ query: 'distributed consensus', topK: 10 });
```
**Integration with Neural Training:**
```bash
# Train patterns from successful collaborations
npx claude-flow@alpha hooks neural-train --pattern "collaboration"
npx claude-flow@alpha hooks session-end --export-metrics true
```
**Key Features:**
- 30 collaborative problem-solving sessions
- 200 knowledge base entries with quality ratings
- 1,000 agent interactions showing emergent patterns
- 50 voting sessions with multiple voting methods
- 100 reputation profiles with trust relationships
### 4. Agent Lifecycle (`agent-lifecycle.ts`)
Generate data for agent lifecycle management and orchestration.
**Examples:**
- Agent spawning and termination events
- State synchronization across distributed agents
- Health check scenarios and monitoring
- Recovery patterns and failure handling
- Version migration and deployment strategies
**Run:**
```bash
npx tsx examples/swarms/agent-lifecycle.ts
```
**Integration with Kubernetes:**
```yaml
# deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: agent-swarm
spec:
replicas: 10
template:
spec:
containers:
- name: agent
image: agent:v2.0
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
```
**Integration with claude-flow:**
```bash
# Spawn agents dynamically
npx claude-flow@alpha hooks pre-task --spawn-agents 5
npx claude-flow@alpha mcp start
# Use MCP tools for lifecycle management
# - agent_spawn: Create new agents
# - swarm_status: Monitor agent health
# - agent_metrics: Track performance
```
**Key Features:**
- 500 lifecycle events (spawn, ready, terminate, failed)
- 500 state snapshots with synchronization events
- 1,000 health checks with auto-healing actions
- 100 failure scenarios with recovery strategies
- 50 version migrations with canary deployments
## Integration Guides
### Claude-Flow Integration
Claude-Flow provides swarm orchestration and neural pattern learning.
**Setup:**
```bash
npm install claude-flow@alpha
npx claude-flow@alpha mcp start
```
**Usage:**
```bash
# Initialize swarm with topology
npx claude-flow@alpha hooks pre-task --description "Initialize mesh swarm"
# Store coordination data in memory
npx claude-flow@alpha hooks post-edit \
--file "coordination.json" \
--memory-key "swarm/coordinator/state"
# Train neural patterns from successful runs
npx claude-flow@alpha hooks neural-train --pattern "distributed-consensus"
# Export session metrics
npx claude-flow@alpha hooks session-end --export-metrics true
```
**MCP Tools:**
- `swarm_init`: Initialize swarm topology
- `agent_spawn`: Spawn new agents
- `task_orchestrate`: Orchestrate distributed tasks
- `swarm_status`: Monitor swarm health
- `neural_patterns`: Analyze learned patterns
- `memory_usage`: Track coordination memory
### Ruv-Swarm Integration
Ruv-Swarm provides enhanced multi-agent coordination.
**Setup:**
```bash
npm install ruv-swarm
npx ruv-swarm mcp start
```
**Usage:**
```typescript
// Access via MCP tools
// - swarm_init: Initialize coordination patterns
// - agent_metrics: Real-time agent performance
// - neural_status: Neural pattern analysis
```
### Flow-Nexus Cloud Integration
Flow-Nexus provides cloud-based agent management and sandboxed execution.
**Setup:**
```bash
npm install flow-nexus@latest
npx flow-nexus@latest register
npx flow-nexus@latest login
```
**MCP Tools (70+ available):**
```bash
# Create cloud sandbox for agent execution
# mcp__flow-nexus__sandbox_create
# Deploy swarm to cloud
# mcp__flow-nexus__swarm_init
# Scale agents dynamically
# mcp__flow-nexus__swarm_scale
# Real-time monitoring
# mcp__flow-nexus__execution_stream_subscribe
```
### Message Queue Integration
#### RabbitMQ
```typescript
import amqp from 'amqplib';
const connection = await amqp.connect('amqp://localhost');
const channel = await connection.createChannel();
await channel.assertQueue('tasks', { durable: true });
// Publish generated task data
for (const task of generatedTasks.data) {
channel.sendToQueue('tasks', Buffer.from(JSON.stringify(task)), {
persistent: true,
priority: task.priority,
});
}
```
#### Apache Kafka
```typescript
import { Kafka } from 'kafkajs';
const kafka = new Kafka({ clientId: 'agentic-synth', brokers: ['localhost:9092'] });
const producer = kafka.producer();
await producer.connect();
await producer.send({
topic: 'agent-events',
messages: generatedEvents.data.map(event => ({
key: event.agent_id,
value: JSON.stringify(event),
partition: hash(event.partition_key) % partitionCount,
})),
});
```
### Database Integration
#### AgenticDB (Vector Database)
```typescript
import AgenticDB from 'agenticdb';
const db = new AgenticDB({ persist: true, path: './agent-knowledge' });
// Store agent knowledge with embeddings
for (const entry of knowledgeBase.data) {
await db.storeVector({
text: entry.content,
metadata: {
category: entry.category,
author: entry.author_agent_id,
rating: entry.quality_rating,
tags: entry.tags,
},
});
}
// Semantic search
const results = await db.search({
query: 'consensus algorithm implementation',
topK: 10,
filter: { category: 'best_practice' },
});
```
#### Redis (State Synchronization)
```typescript
import Redis from 'ioredis';
const redis = new Redis();
// Store agent state
await redis.set(
`agent:${agentId}:state`,
JSON.stringify(stateSnapshot),
'EX',
3600 // TTL in seconds
);
// Get agent state
const state = await redis.get(`agent:${agentId}:state`);
```
## Use Cases
### 1. Testing Distributed Systems
Generate realistic test data for distributed agent systems:
```typescript
import { createSynth } from '@ruvector/agentic-synth';
const synth = createSynth();
// Generate test data for 100 agents coordinating
const testData = await synth.generateStructured({
count: 100,
schema: {
agent_id: 'agent-{1-100}',
tasks_assigned: 'number (0-50)',
messages_sent: 'number (0-200)',
coordination_events: ['array of coordination events'],
},
});
// Use in integration tests
describe('Agent Swarm Coordination', () => {
it('should handle 100 concurrent agents', async () => {
const swarm = new AgentSwarm();
await swarm.initialize(testData);
expect(swarm.activeAgents).toBe(100);
});
});
```
### 2. Load Testing and Benchmarking
Generate high-volume data for performance testing:
```typescript
// Generate 10,000 concurrent events
const loadTestData = await synth.generateEvents({
count: 10000,
eventTypes: ['task_request', 'task_complete', 'heartbeat'],
distribution: 'poisson',
timeRange: { start: new Date(), end: new Date(Date.now() + 3600000) },
});
// Replay events in load test
for (const event of loadTestData.data) {
await testHarness.sendEvent(event);
}
```
### 3. Machine Learning Training
Generate training data for ML models:
```typescript
// Generate agent behavior data for ML training
const trainingData = await synth.generateStructured({
count: 5000,
schema: {
// Features
agent_load: 'number (0-100)',
queue_depth: 'number (0-1000)',
error_rate: 'number (0-100)',
response_time_ms: 'number (10-5000)',
// Label
health_score: 'number (0-100, based on features)',
},
});
// Train predictive model
const features = trainingData.data.map(d => [
d.agent_load,
d.queue_depth,
d.error_rate,
d.response_time_ms,
]);
const labels = trainingData.data.map(d => d.health_score);
await model.fit(features, labels);
```
### 4. Monitoring and Alerting
Generate test data for monitoring systems:
```typescript
// Generate various failure scenarios
const monitoringData = await synth.generateEvents({
count: 200,
eventTypes: ['agent_crash', 'high_latency', 'resource_exhaustion'],
schema: {
severity: 'critical | warning | info',
affected_agents: ['array of agent ids'],
metrics: { cpu: 'number', memory: 'number', latency: 'number' },
},
});
// Test alerting rules
for (const event of monitoringData.data) {
if (event.severity === 'critical') {
expect(alertingSystem.shouldAlert(event)).toBe(true);
}
}
```
## Performance Considerations
### Caching
Enable caching to speed up repeated data generation:
```typescript
const synth = createSynth({
cacheStrategy: 'memory', // or 'disk'
cacheTTL: 3600, // 1 hour
});
// First call - generates data
const data1 = await synth.generate('structured', options);
// Second call - returns cached result (much faster)
const data2 = await synth.generate('structured', options);
```
### Batch Generation
Generate multiple datasets in parallel:
```typescript
const batches = [
{ count: 100, schema: agentCoordinationSchema },
{ count: 200, schema: taskDistributionSchema },
{ count: 150, schema: healthCheckSchema },
];
const results = await synth.generateBatch('structured', batches, 3); // 3 concurrent
```
### Streaming
Generate large datasets with streaming:
```typescript
for await (const agent of synth.generateStream('structured', {
count: 10000,
schema: agentSchema,
})) {
await processAgent(agent);
// Process one at a time to avoid memory issues
}
```
## Best Practices
1. **Schema Design**: Create reusable schemas for consistency
2. **Constraints**: Use constraints to ensure data validity
3. **Caching**: Enable caching for development/testing
4. **Error Handling**: Always handle generation errors gracefully
5. **Validation**: Validate generated data before use
6. **Integration**: Use hooks for seamless integration with coordination frameworks
## Troubleshooting
### Common Issues
**Issue: API rate limits**
```typescript
// Solution: Enable caching and batch requests
const synth = createSynth({
cacheStrategy: 'memory',
maxRetries: 3,
timeout: 30000,
});
```
**Issue: Memory usage with large datasets**
```typescript
// Solution: Use streaming instead of batch generation
for await (const item of synth.generateStream('structured', options)) {
await processItem(item);
}
```
**Issue: Inconsistent data across runs**
```typescript
// Solution: Use constraints for consistency
const result = await synth.generateStructured({
count: 100,
schema: {...},
constraints: [
'IDs should be unique',
'Timestamps should be in chronological order',
'References should point to valid entities',
],
});
```
## API Reference
### Main Functions
- `createSynth(config)`: Create agentic-synth instance
- `generateStructured(options)`: Generate structured data
- `generateEvents(options)`: Generate event streams
- `generateTimeSeries(options)`: Generate time-series data
- `generateStream(type, options)`: Stream generation
- `generateBatch(type, batches, concurrency)`: Batch generation
### Configuration Options
```typescript
interface SynthConfig {
provider: 'gemini' | 'openrouter';
apiKey?: string;
model?: string;
cacheStrategy?: 'memory' | 'disk' | 'none';
cacheTTL?: number; // seconds
maxRetries?: number;
timeout?: number; // milliseconds
streaming?: boolean;
}
```
## Contributing
Contributions are welcome! Please submit examples that demonstrate:
- Real-world multi-agent patterns
- Integration with popular frameworks
- Performance optimizations
- Novel coordination strategies
## Resources
- [agentic-synth Documentation](https://github.com/ruvnet/ruvector/tree/main/packages/agentic-synth)
- [claude-flow Documentation](https://github.com/ruvnet/claude-flow)
- [AgenticDB Documentation](https://github.com/ruvnet/ruvector)
- [Flow-Nexus Platform](https://flow-nexus.ruv.io)
## License
MIT License - See LICENSE file for details
## Support
- GitHub Issues: https://github.com/ruvnet/ruvector/issues
- Discussions: https://github.com/ruvnet/ruvector/discussions
- Discord: [Join our community](#)
---
**Note**: All examples use environment variables for API keys. Set `GEMINI_API_KEY` or `OPENROUTER_API_KEY` before running examples.

View File

@@ -0,0 +1,47 @@
/**
* Multi-Agent System Coordination Examples
*
* Demonstrates agent communication patterns, task distribution,
* consensus building, load balancing, and fault tolerance scenarios
* for distributed agent systems.
*
* Integrates with:
* - claude-flow: Swarm initialization and orchestration
* - ruv-swarm: Enhanced coordination patterns
* - flow-nexus: Cloud-based agent management
*/
/**
* Generate communication patterns for multi-agent systems
*/
export declare function agentCommunicationPatterns(): Promise<import("../../dist/index.js").GenerationResult<unknown>>;
/**
* Generate task distribution data for load balancing
*/
export declare function taskDistributionScenarios(): Promise<import("../../dist/index.js").GenerationResult<unknown>>;
/**
* Generate consensus protocol data for distributed decision making
*/
export declare function consensusBuildingData(): Promise<import("../../dist/index.js").GenerationResult<unknown>>;
/**
* Generate load balancing metrics and patterns
*/
export declare function loadBalancingPatterns(): Promise<{
metrics: import("../../dist/index.js").GenerationResult<unknown>;
agentMetrics: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate fault tolerance and failure recovery scenarios
*/
export declare function faultToleranceScenarios(): Promise<{
failures: import("../../dist/index.js").GenerationResult<unknown>;
recoveryActions: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate hierarchical swarm coordination data
*/
export declare function hierarchicalCoordination(): Promise<{
topology: import("../../dist/index.js").GenerationResult<unknown>;
events: import("../../dist/index.js").GenerationResult<unknown>;
}>;
export declare function runAllCoordinationExamples(): Promise<void>;
//# sourceMappingURL=agent-coordination.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"agent-coordination.d.ts","sourceRoot":"","sources":["agent-coordination.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AASH;;GAEG;AACH,wBAAsB,0BAA0B,qEAqD/C;AAMD;;GAEG;AACH,wBAAsB,yBAAyB,qEAqD9C;AAMD;;GAEG;AACH,wBAAsB,qBAAqB,qEAkE1C;AAMD;;GAEG;AACH,wBAAsB,qBAAqB;;;GA4E1C;AAMD;;GAEG;AACH,wBAAsB,uBAAuB;;;GAoF5C;AAMD;;GAEG;AACH,wBAAsB,wBAAwB;;;GA8E7C;AAMD,wBAAsB,0BAA0B,kBA4B/C"}

View File

@@ -0,0 +1,456 @@
"use strict";
/**
* Multi-Agent System Coordination Examples
*
* Demonstrates agent communication patterns, task distribution,
* consensus building, load balancing, and fault tolerance scenarios
* for distributed agent systems.
*
* Integrates with:
* - claude-flow: Swarm initialization and orchestration
* - ruv-swarm: Enhanced coordination patterns
* - flow-nexus: Cloud-based agent management
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.agentCommunicationPatterns = agentCommunicationPatterns;
exports.taskDistributionScenarios = taskDistributionScenarios;
exports.consensusBuildingData = consensusBuildingData;
exports.loadBalancingPatterns = loadBalancingPatterns;
exports.faultToleranceScenarios = faultToleranceScenarios;
exports.hierarchicalCoordination = hierarchicalCoordination;
exports.runAllCoordinationExamples = runAllCoordinationExamples;
const index_js_1 = require("../../dist/index.js");
// ============================================================================
// Example 1: Agent Communication Patterns
// ============================================================================
/**
* Generate communication patterns for multi-agent systems
*/
async function agentCommunicationPatterns() {
console.log('\n🤖 Example 1: Agent Communication Patterns\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate message passing data
const messages = await synth.generateEvents({
count: 500,
eventTypes: [
'direct_message',
'broadcast',
'multicast',
'request_reply',
'publish_subscribe',
],
schema: {
message_id: 'UUID',
sender_agent_id: 'agent-{1-20}',
receiver_agent_id: 'agent-{1-20} or "broadcast"',
message_type: 'one of eventTypes',
payload: {
action: 'task_request | status_update | data_sync | error_report | ack',
data: 'JSON object with task details',
priority: 'high | medium | low',
requires_response: 'boolean',
},
timestamp: 'ISO timestamp',
latency_ms: 'number (1-500)',
success: 'boolean (95% true)',
},
distribution: 'poisson',
timeRange: {
start: new Date(Date.now() - 3600000), // Last hour
end: new Date(),
},
});
console.log('Communication Patterns Generated:');
console.log(`- Total messages: ${messages.data.length}`);
console.log(`- Direct messages: ${messages.data.filter((m) => m.message_type === 'direct_message').length}`);
console.log(`- Broadcasts: ${messages.data.filter((m) => m.message_type === 'broadcast').length}`);
console.log(`- Average latency: ${(messages.data.reduce((sum, m) => sum + m.latency_ms, 0) / messages.data.length).toFixed(2)}ms`);
// Integration with claude-flow hooks
console.log('\nClaude-Flow Integration:');
console.log('npx claude-flow@alpha hooks notify --message "Communication patterns generated"');
console.log('npx claude-flow@alpha hooks post-edit --file "messages.json" --memory-key "swarm/coordinator/messages"');
return messages;
}
// ============================================================================
// Example 2: Task Distribution Scenarios
// ============================================================================
/**
* Generate task distribution data for load balancing
*/
async function taskDistributionScenarios() {
console.log('\n📋 Example 2: Task Distribution Scenarios\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate task distribution events
const tasks = await synth.generateStructured({
count: 300,
schema: {
task_id: 'UUID',
task_type: 'compute | data_processing | io_operation | ml_inference | api_call',
assigned_agent: 'agent-{1-15}',
estimated_duration_ms: 'number (100-10000)',
actual_duration_ms: 'number (estimated_duration_ms * 0.8-1.2)',
cpu_usage: 'number (0-100)',
memory_mb: 'number (10-1000)',
priority: 'number (1-10)',
dependencies: ['array of 0-3 task_ids or empty'],
status: 'pending | running | completed | failed',
start_time: 'ISO timestamp',
end_time: 'ISO timestamp or null',
retry_count: 'number (0-3)',
},
constraints: [
'Tasks should be distributed evenly across agents',
'High priority tasks (8-10) should complete faster',
'5% of tasks should have failed status',
'Dependencies should form valid DAG (no cycles)',
],
});
// Analyze distribution
const agentLoad = new Map();
tasks.data.forEach((task) => {
agentLoad.set(task.assigned_agent, (agentLoad.get(task.assigned_agent) || 0) + 1);
});
console.log('Task Distribution Analysis:');
console.log(`- Total tasks: ${tasks.data.length}`);
console.log(`- Agents utilized: ${agentLoad.size}`);
console.log(`- Tasks per agent (avg): ${(tasks.data.length / agentLoad.size).toFixed(1)}`);
console.log(`- Failed tasks: ${tasks.data.filter((t) => t.status === 'failed').length}`);
console.log(`- Completed tasks: ${tasks.data.filter((t) => t.status === 'completed').length}`);
// Ruv-Swarm coordination pattern
console.log('\nRuv-Swarm Coordination:');
console.log('npx ruv-swarm mcp start');
console.log('// Use MCP tools: swarm_init, task_orchestrate, agent_metrics');
return tasks;
}
// ============================================================================
// Example 3: Consensus Building Data
// ============================================================================
/**
* Generate consensus protocol data for distributed decision making
*/
async function consensusBuildingData() {
console.log('\n🤝 Example 3: Consensus Building Scenarios\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate consensus rounds
const consensusRounds = await synth.generateStructured({
count: 50,
schema: {
round_id: 'UUID',
proposal_id: 'UUID',
protocol: 'raft | paxos | byzantine | quorum',
participants: ['array of 5-15 agent ids'],
proposer: 'agent id from participants',
proposal: {
type: 'leader_election | config_change | state_update | task_assignment',
value: 'JSON object with proposal details',
},
votes: [
{
agent_id: 'agent id from participants',
vote: 'accept | reject | abstain',
timestamp: 'ISO timestamp',
reasoning: 'short explanation',
},
],
status: 'proposing | voting | committed | rejected | timeout',
quorum_required: 'number (majority of participants)',
quorum_reached: 'boolean',
decision: 'accepted | rejected | timeout',
round_duration_ms: 'number (100-5000)',
timestamp: 'ISO timestamp',
},
constraints: [
'Votes array should have one entry per participant',
'70% of rounds should reach quorum',
'80% of committed rounds should be accepted',
'Byzantine protocol should have 3f+1 participants (f failures)',
],
});
// Analyze consensus
const successRate = consensusRounds.data.filter((r) => r.decision === 'accepted').length / consensusRounds.data.length;
console.log('Consensus Analysis:');
console.log(`- Total rounds: ${consensusRounds.data.length}`);
console.log(`- Success rate: ${(successRate * 100).toFixed(1)}%`);
console.log(`- Average duration: ${(consensusRounds.data.reduce((sum, r) => sum + r.round_duration_ms, 0) / consensusRounds.data.length).toFixed(0)}ms`);
console.log(`- Quorum reached: ${consensusRounds.data.filter((r) => r.quorum_reached).length} rounds`);
// Protocol distribution
const protocolCount = new Map();
consensusRounds.data.forEach((r) => {
protocolCount.set(r.protocol, (protocolCount.get(r.protocol) || 0) + 1);
});
console.log('\nProtocol Usage:');
protocolCount.forEach((count, protocol) => {
console.log(`- ${protocol}: ${count} rounds`);
});
return consensusRounds;
}
// ============================================================================
// Example 4: Load Balancing Patterns
// ============================================================================
/**
* Generate load balancing metrics and patterns
*/
async function loadBalancingPatterns() {
console.log('\n⚖ Example 4: Load Balancing Patterns\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate time-series load balancing metrics
const metrics = await synth.generateTimeSeries({
count: 200,
interval: '30s',
metrics: [
'agent_count',
'total_requests',
'avg_response_time_ms',
'cpu_utilization',
'memory_utilization',
'queue_depth',
],
trend: 'mixed',
seasonality: true,
});
// Generate agent-specific metrics
const agentMetrics = await synth.generateStructured({
count: 100,
schema: {
timestamp: 'ISO timestamp',
agent_id: 'agent-{1-10}',
algorithm: 'round_robin | least_connections | weighted | ip_hash | consistent_hash',
requests_handled: 'number (0-100)',
active_connections: 'number (0-50)',
cpu_percent: 'number (0-100)',
memory_mb: 'number (100-2000)',
response_time_p50: 'number (10-500)',
response_time_p99: 'number (50-2000)',
error_rate: 'number (0-0.05)',
health_score: 'number (0-100)',
},
constraints: [
'Load should be relatively balanced across agents',
'High CPU should correlate with high request count',
'Error rate should increase with overload',
'Health score should decrease with high utilization',
],
});
console.log('Load Balancing Analysis:');
console.log(`- Time series points: ${metrics.data.length}`);
console.log(`- Agent metrics: ${agentMetrics.data.length}`);
// Calculate balance score
const requestsByAgent = new Map();
agentMetrics.data.forEach((m) => {
requestsByAgent.set(m.agent_id, (requestsByAgent.get(m.agent_id) || 0) + m.requests_handled);
});
const avgRequests = Array.from(requestsByAgent.values()).reduce((a, b) => a + b, 0) / requestsByAgent.size;
const variance = Array.from(requestsByAgent.values()).reduce((sum, val) => sum + Math.pow(val - avgRequests, 2), 0) / requestsByAgent.size;
console.log(`- Average requests per agent: ${avgRequests.toFixed(1)}`);
console.log(`- Load variance: ${variance.toFixed(2)} (lower is better)`);
// Flow-Nexus integration for cloud load balancing
console.log('\nFlow-Nexus Cloud Integration:');
console.log('npx flow-nexus@latest login');
console.log('// Use MCP tools: swarm_scale, agent_spawn, sandbox_create');
return { metrics, agentMetrics };
}
// ============================================================================
// Example 5: Fault Tolerance Scenarios
// ============================================================================
/**
* Generate fault tolerance and failure recovery scenarios
*/
async function faultToleranceScenarios() {
console.log('\n🛡 Example 5: Fault Tolerance Scenarios\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate failure events
const failures = await synth.generateEvents({
count: 100,
eventTypes: [
'agent_crash',
'network_partition',
'timeout',
'out_of_memory',
'resource_exhaustion',
'byzantine_fault',
],
schema: {
incident_id: 'UUID',
event_type: 'one of eventTypes',
affected_agents: ['array of 1-5 agent ids'],
severity: 'critical | high | medium | low',
detection_time: 'ISO timestamp',
recovery_initiated: 'ISO timestamp',
recovery_completed: 'ISO timestamp or null',
recovery_strategy: 'restart | failover | rollback | isolation | replication',
data_lost: 'boolean',
service_degraded: 'boolean',
mttr_seconds: 'number (10-600)',
root_cause: 'short description of root cause',
},
distribution: 'uniform',
timeRange: {
start: new Date(Date.now() - 86400000), // Last 24 hours
end: new Date(),
},
});
// Generate recovery actions
const recoveryActions = await synth.generateStructured({
count: failures.data.length,
schema: {
incident_id: 'UUID (from failures)',
action_id: 'UUID',
action_type: 'health_check | restart_agent | promote_backup | restore_state | load_balance',
executor: 'coordinator | agent-{id} | auto_healer',
success: 'boolean (90% true)',
duration_ms: 'number (100-10000)',
retries: 'number (0-3)',
compensating_actions: ['array of 0-2 action types or empty'],
timestamp: 'ISO timestamp',
},
});
// Analyze fault tolerance
const mttrAvg = failures.data.reduce((sum, f) => sum + f.mttr_seconds, 0) / failures.data.length;
const recoveryRate = recoveryActions.data.filter((a) => a.success).length / recoveryActions.data.length;
console.log('Fault Tolerance Analysis:');
console.log(`- Total incidents: ${failures.data.length}`);
console.log(`- Average MTTR: ${mttrAvg.toFixed(1)} seconds`);
console.log(`- Recovery success rate: ${(recoveryRate * 100).toFixed(1)}%`);
console.log(`- Data loss incidents: ${failures.data.filter((f) => f.data_lost).length}`);
console.log(`- Service degraded: ${failures.data.filter((f) => f.service_degraded).length}`);
// Failure type distribution
const failureTypes = new Map();
failures.data.forEach((f) => {
failureTypes.set(f.event_type, (failureTypes.get(f.event_type) || 0) + 1);
});
console.log('\nFailure Type Distribution:');
failureTypes.forEach((count, type) => {
console.log(`- ${type}: ${count} (${((count / failures.data.length) * 100).toFixed(1)}%)`);
});
// Self-healing integration
console.log('\nSelf-Healing Integration:');
console.log('// Enable auto-recovery hooks');
console.log('npx claude-flow@alpha hooks enable --type "error-recovery"');
return { failures, recoveryActions };
}
// ============================================================================
// Example 6: Hierarchical Coordination
// ============================================================================
/**
* Generate hierarchical swarm coordination data
*/
async function hierarchicalCoordination() {
console.log('\n🏗 Example 6: Hierarchical Swarm Coordination\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate hierarchical structure
const swarmTopology = await synth.generateStructured({
count: 1,
schema: {
topology_id: 'UUID',
type: 'hierarchical',
coordinator: {
agent_id: 'coordinator-main',
role: 'master_coordinator',
responsibilities: ['task_distribution', 'health_monitoring', 'consensus_leader'],
},
sub_coordinators: [
{
agent_id: 'sub-coordinator-{1-5}',
role: 'regional_coordinator',
manages_agents: ['array of 5-10 worker agent ids'],
region: 'zone-{A-E}',
},
],
workers: [
{
agent_id: 'worker-{1-50}',
coordinator_id: 'sub-coordinator id',
capabilities: ['array of 2-4 capabilities'],
status: 'active | idle | busy | offline',
},
],
communication_patterns: {
coordinator_to_sub: 'direct',
sub_to_workers: 'multicast',
worker_to_coordinator: 'via_sub_coordinator',
peer_to_peer: 'disabled',
},
},
});
// Generate coordination events
const coordinationEvents = await synth.generateEvents({
count: 200,
eventTypes: [
'task_delegation',
'status_report',
'resource_request',
'coordination_sync',
'topology_update',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
from_agent: 'agent id from topology',
to_agent: 'agent id from topology',
hierarchy_level: 'coordinator | sub_coordinator | worker',
payload: 'JSON object',
timestamp: 'ISO timestamp',
},
});
console.log('Hierarchical Coordination:');
console.log(`- Total agents: ${swarmTopology.data[0].workers.length + swarmTopology.data[0].sub_coordinators.length + 1}`);
console.log(`- Sub-coordinators: ${swarmTopology.data[0].sub_coordinators.length}`);
console.log(`- Workers: ${swarmTopology.data[0].workers.length}`);
console.log(`- Coordination events: ${coordinationEvents.data.length}`);
// Claude-Flow hierarchical setup
console.log('\nClaude-Flow Hierarchical Setup:');
console.log('npx claude-flow@alpha mcp start');
console.log('// MCP: swarm_init with topology: "hierarchical"');
console.log('// MCP: agent_spawn with role assignments');
return { topology: swarmTopology, events: coordinationEvents };
}
// ============================================================================
// Run All Examples
// ============================================================================
async function runAllCoordinationExamples() {
console.log('🚀 Running All Agent Coordination Examples\n');
console.log('='.repeat(70));
try {
await agentCommunicationPatterns();
console.log('='.repeat(70));
await taskDistributionScenarios();
console.log('='.repeat(70));
await consensusBuildingData();
console.log('='.repeat(70));
await loadBalancingPatterns();
console.log('='.repeat(70));
await faultToleranceScenarios();
console.log('='.repeat(70));
await hierarchicalCoordination();
console.log('='.repeat(70));
console.log('\n✅ All agent coordination examples completed!\n');
}
catch (error) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllCoordinationExamples().catch(console.error);
}
//# sourceMappingURL=agent-coordination.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,518 @@
/**
* Multi-Agent System Coordination Examples
*
* Demonstrates agent communication patterns, task distribution,
* consensus building, load balancing, and fault tolerance scenarios
* for distributed agent systems.
*
* Integrates with:
* - claude-flow: Swarm initialization and orchestration
* - ruv-swarm: Enhanced coordination patterns
* - flow-nexus: Cloud-based agent management
*/
import { AgenticSynth, createSynth } from '../../dist/index.js';
import type { GenerationResult } from '../../src/types.js';
// ============================================================================
// Example 1: Agent Communication Patterns
// ============================================================================
/**
* Generate communication patterns for multi-agent systems
*/
export async function agentCommunicationPatterns() {
console.log('\n🤖 Example 1: Agent Communication Patterns\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate message passing data
const messages = await synth.generateEvents({
count: 500,
eventTypes: [
'direct_message',
'broadcast',
'multicast',
'request_reply',
'publish_subscribe',
],
schema: {
message_id: 'UUID',
sender_agent_id: 'agent-{1-20}',
receiver_agent_id: 'agent-{1-20} or "broadcast"',
message_type: 'one of eventTypes',
payload: {
action: 'task_request | status_update | data_sync | error_report | ack',
data: 'JSON object with task details',
priority: 'high | medium | low',
requires_response: 'boolean',
},
timestamp: 'ISO timestamp',
latency_ms: 'number (1-500)',
success: 'boolean (95% true)',
},
distribution: 'poisson',
timeRange: {
start: new Date(Date.now() - 3600000), // Last hour
end: new Date(),
},
});
console.log('Communication Patterns Generated:');
console.log(`- Total messages: ${messages.data.length}`);
console.log(`- Direct messages: ${messages.data.filter((m: any) => m.message_type === 'direct_message').length}`);
console.log(`- Broadcasts: ${messages.data.filter((m: any) => m.message_type === 'broadcast').length}`);
console.log(`- Average latency: ${(messages.data.reduce((sum: number, m: any) => sum + m.latency_ms, 0) / messages.data.length).toFixed(2)}ms`);
// Integration with claude-flow hooks
console.log('\nClaude-Flow Integration:');
console.log('npx claude-flow@alpha hooks notify --message "Communication patterns generated"');
console.log('npx claude-flow@alpha hooks post-edit --file "messages.json" --memory-key "swarm/coordinator/messages"');
return messages;
}
// ============================================================================
// Example 2: Task Distribution Scenarios
// ============================================================================
/**
* Generate task distribution data for load balancing
*/
export async function taskDistributionScenarios() {
console.log('\n📋 Example 2: Task Distribution Scenarios\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate task distribution events
const tasks = await synth.generateStructured({
count: 300,
schema: {
task_id: 'UUID',
task_type: 'compute | data_processing | io_operation | ml_inference | api_call',
assigned_agent: 'agent-{1-15}',
estimated_duration_ms: 'number (100-10000)',
actual_duration_ms: 'number (estimated_duration_ms * 0.8-1.2)',
cpu_usage: 'number (0-100)',
memory_mb: 'number (10-1000)',
priority: 'number (1-10)',
dependencies: ['array of 0-3 task_ids or empty'],
status: 'pending | running | completed | failed',
start_time: 'ISO timestamp',
end_time: 'ISO timestamp or null',
retry_count: 'number (0-3)',
},
constraints: [
'Tasks should be distributed evenly across agents',
'High priority tasks (8-10) should complete faster',
'5% of tasks should have failed status',
'Dependencies should form valid DAG (no cycles)',
],
});
// Analyze distribution
const agentLoad = new Map<string, number>();
tasks.data.forEach((task: any) => {
agentLoad.set(task.assigned_agent, (agentLoad.get(task.assigned_agent) || 0) + 1);
});
console.log('Task Distribution Analysis:');
console.log(`- Total tasks: ${tasks.data.length}`);
console.log(`- Agents utilized: ${agentLoad.size}`);
console.log(`- Tasks per agent (avg): ${(tasks.data.length / agentLoad.size).toFixed(1)}`);
console.log(`- Failed tasks: ${tasks.data.filter((t: any) => t.status === 'failed').length}`);
console.log(`- Completed tasks: ${tasks.data.filter((t: any) => t.status === 'completed').length}`);
// Ruv-Swarm coordination pattern
console.log('\nRuv-Swarm Coordination:');
console.log('npx ruv-swarm mcp start');
console.log('// Use MCP tools: swarm_init, task_orchestrate, agent_metrics');
return tasks;
}
// ============================================================================
// Example 3: Consensus Building Data
// ============================================================================
/**
* Generate consensus protocol data for distributed decision making
*/
export async function consensusBuildingData() {
console.log('\n🤝 Example 3: Consensus Building Scenarios\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate consensus rounds
const consensusRounds = await synth.generateStructured({
count: 50,
schema: {
round_id: 'UUID',
proposal_id: 'UUID',
protocol: 'raft | paxos | byzantine | quorum',
participants: ['array of 5-15 agent ids'],
proposer: 'agent id from participants',
proposal: {
type: 'leader_election | config_change | state_update | task_assignment',
value: 'JSON object with proposal details',
},
votes: [
{
agent_id: 'agent id from participants',
vote: 'accept | reject | abstain',
timestamp: 'ISO timestamp',
reasoning: 'short explanation',
},
],
status: 'proposing | voting | committed | rejected | timeout',
quorum_required: 'number (majority of participants)',
quorum_reached: 'boolean',
decision: 'accepted | rejected | timeout',
round_duration_ms: 'number (100-5000)',
timestamp: 'ISO timestamp',
},
constraints: [
'Votes array should have one entry per participant',
'70% of rounds should reach quorum',
'80% of committed rounds should be accepted',
'Byzantine protocol should have 3f+1 participants (f failures)',
],
});
// Analyze consensus
const successRate = consensusRounds.data.filter(
(r: any) => r.decision === 'accepted'
).length / consensusRounds.data.length;
console.log('Consensus Analysis:');
console.log(`- Total rounds: ${consensusRounds.data.length}`);
console.log(`- Success rate: ${(successRate * 100).toFixed(1)}%`);
console.log(`- Average duration: ${(consensusRounds.data.reduce((sum: number, r: any) => sum + r.round_duration_ms, 0) / consensusRounds.data.length).toFixed(0)}ms`);
console.log(`- Quorum reached: ${consensusRounds.data.filter((r: any) => r.quorum_reached).length} rounds`);
// Protocol distribution
const protocolCount = new Map<string, number>();
consensusRounds.data.forEach((r: any) => {
protocolCount.set(r.protocol, (protocolCount.get(r.protocol) || 0) + 1);
});
console.log('\nProtocol Usage:');
protocolCount.forEach((count, protocol) => {
console.log(`- ${protocol}: ${count} rounds`);
});
return consensusRounds;
}
// ============================================================================
// Example 4: Load Balancing Patterns
// ============================================================================
/**
* Generate load balancing metrics and patterns
*/
export async function loadBalancingPatterns() {
console.log('\n⚖ Example 4: Load Balancing Patterns\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate time-series load balancing metrics
const metrics = await synth.generateTimeSeries({
count: 200,
interval: '30s',
metrics: [
'agent_count',
'total_requests',
'avg_response_time_ms',
'cpu_utilization',
'memory_utilization',
'queue_depth',
],
trend: 'mixed',
seasonality: true,
});
// Generate agent-specific metrics
const agentMetrics = await synth.generateStructured({
count: 100,
schema: {
timestamp: 'ISO timestamp',
agent_id: 'agent-{1-10}',
algorithm: 'round_robin | least_connections | weighted | ip_hash | consistent_hash',
requests_handled: 'number (0-100)',
active_connections: 'number (0-50)',
cpu_percent: 'number (0-100)',
memory_mb: 'number (100-2000)',
response_time_p50: 'number (10-500)',
response_time_p99: 'number (50-2000)',
error_rate: 'number (0-0.05)',
health_score: 'number (0-100)',
},
constraints: [
'Load should be relatively balanced across agents',
'High CPU should correlate with high request count',
'Error rate should increase with overload',
'Health score should decrease with high utilization',
],
});
console.log('Load Balancing Analysis:');
console.log(`- Time series points: ${metrics.data.length}`);
console.log(`- Agent metrics: ${agentMetrics.data.length}`);
// Calculate balance score
const requestsByAgent = new Map<string, number>();
agentMetrics.data.forEach((m: any) => {
requestsByAgent.set(
m.agent_id,
(requestsByAgent.get(m.agent_id) || 0) + m.requests_handled
);
});
const avgRequests = Array.from(requestsByAgent.values()).reduce((a, b) => a + b, 0) / requestsByAgent.size;
const variance = Array.from(requestsByAgent.values()).reduce(
(sum, val) => sum + Math.pow(val - avgRequests, 2),
0
) / requestsByAgent.size;
console.log(`- Average requests per agent: ${avgRequests.toFixed(1)}`);
console.log(`- Load variance: ${variance.toFixed(2)} (lower is better)`);
// Flow-Nexus integration for cloud load balancing
console.log('\nFlow-Nexus Cloud Integration:');
console.log('npx flow-nexus@latest login');
console.log('// Use MCP tools: swarm_scale, agent_spawn, sandbox_create');
return { metrics, agentMetrics };
}
// ============================================================================
// Example 5: Fault Tolerance Scenarios
// ============================================================================
/**
* Generate fault tolerance and failure recovery scenarios
*/
export async function faultToleranceScenarios() {
console.log('\n🛡 Example 5: Fault Tolerance Scenarios\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate failure events
const failures = await synth.generateEvents({
count: 100,
eventTypes: [
'agent_crash',
'network_partition',
'timeout',
'out_of_memory',
'resource_exhaustion',
'byzantine_fault',
],
schema: {
incident_id: 'UUID',
event_type: 'one of eventTypes',
affected_agents: ['array of 1-5 agent ids'],
severity: 'critical | high | medium | low',
detection_time: 'ISO timestamp',
recovery_initiated: 'ISO timestamp',
recovery_completed: 'ISO timestamp or null',
recovery_strategy: 'restart | failover | rollback | isolation | replication',
data_lost: 'boolean',
service_degraded: 'boolean',
mttr_seconds: 'number (10-600)',
root_cause: 'short description of root cause',
},
distribution: 'uniform',
timeRange: {
start: new Date(Date.now() - 86400000), // Last 24 hours
end: new Date(),
},
});
// Generate recovery actions
const recoveryActions = await synth.generateStructured({
count: failures.data.length,
schema: {
incident_id: 'UUID (from failures)',
action_id: 'UUID',
action_type: 'health_check | restart_agent | promote_backup | restore_state | load_balance',
executor: 'coordinator | agent-{id} | auto_healer',
success: 'boolean (90% true)',
duration_ms: 'number (100-10000)',
retries: 'number (0-3)',
compensating_actions: ['array of 0-2 action types or empty'],
timestamp: 'ISO timestamp',
},
});
// Analyze fault tolerance
const mttrAvg = failures.data.reduce((sum: number, f: any) => sum + f.mttr_seconds, 0) / failures.data.length;
const recoveryRate = recoveryActions.data.filter((a: any) => a.success).length / recoveryActions.data.length;
console.log('Fault Tolerance Analysis:');
console.log(`- Total incidents: ${failures.data.length}`);
console.log(`- Average MTTR: ${mttrAvg.toFixed(1)} seconds`);
console.log(`- Recovery success rate: ${(recoveryRate * 100).toFixed(1)}%`);
console.log(`- Data loss incidents: ${failures.data.filter((f: any) => f.data_lost).length}`);
console.log(`- Service degraded: ${failures.data.filter((f: any) => f.service_degraded).length}`);
// Failure type distribution
const failureTypes = new Map<string, number>();
failures.data.forEach((f: any) => {
failureTypes.set(f.event_type, (failureTypes.get(f.event_type) || 0) + 1);
});
console.log('\nFailure Type Distribution:');
failureTypes.forEach((count, type) => {
console.log(`- ${type}: ${count} (${((count / failures.data.length) * 100).toFixed(1)}%)`);
});
// Self-healing integration
console.log('\nSelf-Healing Integration:');
console.log('// Enable auto-recovery hooks');
console.log('npx claude-flow@alpha hooks enable --type "error-recovery"');
return { failures, recoveryActions };
}
// ============================================================================
// Example 6: Hierarchical Coordination
// ============================================================================
/**
* Generate hierarchical swarm coordination data
*/
export async function hierarchicalCoordination() {
console.log('\n🏗 Example 6: Hierarchical Swarm Coordination\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate hierarchical structure
const swarmTopology = await synth.generateStructured({
count: 1,
schema: {
topology_id: 'UUID',
type: 'hierarchical',
coordinator: {
agent_id: 'coordinator-main',
role: 'master_coordinator',
responsibilities: ['task_distribution', 'health_monitoring', 'consensus_leader'],
},
sub_coordinators: [
{
agent_id: 'sub-coordinator-{1-5}',
role: 'regional_coordinator',
manages_agents: ['array of 5-10 worker agent ids'],
region: 'zone-{A-E}',
},
],
workers: [
{
agent_id: 'worker-{1-50}',
coordinator_id: 'sub-coordinator id',
capabilities: ['array of 2-4 capabilities'],
status: 'active | idle | busy | offline',
},
],
communication_patterns: {
coordinator_to_sub: 'direct',
sub_to_workers: 'multicast',
worker_to_coordinator: 'via_sub_coordinator',
peer_to_peer: 'disabled',
},
},
});
// Generate coordination events
const coordinationEvents = await synth.generateEvents({
count: 200,
eventTypes: [
'task_delegation',
'status_report',
'resource_request',
'coordination_sync',
'topology_update',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
from_agent: 'agent id from topology',
to_agent: 'agent id from topology',
hierarchy_level: 'coordinator | sub_coordinator | worker',
payload: 'JSON object',
timestamp: 'ISO timestamp',
},
});
console.log('Hierarchical Coordination:');
console.log(`- Total agents: ${swarmTopology.data[0].workers.length + swarmTopology.data[0].sub_coordinators.length + 1}`);
console.log(`- Sub-coordinators: ${swarmTopology.data[0].sub_coordinators.length}`);
console.log(`- Workers: ${swarmTopology.data[0].workers.length}`);
console.log(`- Coordination events: ${coordinationEvents.data.length}`);
// Claude-Flow hierarchical setup
console.log('\nClaude-Flow Hierarchical Setup:');
console.log('npx claude-flow@alpha mcp start');
console.log('// MCP: swarm_init with topology: "hierarchical"');
console.log('// MCP: agent_spawn with role assignments');
return { topology: swarmTopology, events: coordinationEvents };
}
// ============================================================================
// Run All Examples
// ============================================================================
export async function runAllCoordinationExamples() {
console.log('🚀 Running All Agent Coordination Examples\n');
console.log('='.repeat(70));
try {
await agentCommunicationPatterns();
console.log('='.repeat(70));
await taskDistributionScenarios();
console.log('='.repeat(70));
await consensusBuildingData();
console.log('='.repeat(70));
await loadBalancingPatterns();
console.log('='.repeat(70));
await faultToleranceScenarios();
console.log('='.repeat(70));
await hierarchicalCoordination();
console.log('='.repeat(70));
console.log('\n✅ All agent coordination examples completed!\n');
} catch (error: any) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllCoordinationExamples().catch(console.error);
}

View File

@@ -0,0 +1,58 @@
/**
* Agent Lifecycle Management Examples
*
* Demonstrates agent lifecycle patterns including spawning/termination,
* state synchronization, health checks, recovery patterns, and
* version migration for dynamic agent systems.
*
* Integrates with:
* - claude-flow: Agent lifecycle hooks and state management
* - ruv-swarm: Dynamic agent spawning and coordination
* - Kubernetes: Container orchestration patterns
*/
/**
* Generate agent spawning and termination lifecycle events
*/
export declare function agentSpawningTermination(): Promise<{
lifecycleEvents: import("../../dist/index.js").GenerationResult<unknown>;
spawnStrategies: import("../../dist/index.js").GenerationResult<unknown>;
resourcePool: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate state synchronization data for distributed agents
*/
export declare function stateSynchronization(): Promise<{
stateSnapshots: import("../../dist/index.js").GenerationResult<unknown>;
syncEvents: import("../../dist/index.js").GenerationResult<unknown>;
consistencyChecks: import("../../dist/index.js").GenerationResult<unknown>;
syncTopology: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate health check and monitoring data
*/
export declare function healthCheckScenarios(): Promise<{
healthChecks: import("../../dist/index.js").GenerationResult<unknown>;
healthConfigs: import("../../dist/index.js").GenerationResult<unknown>;
healthTimeSeries: import("../../dist/index.js").GenerationResult<unknown>;
healingActions: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate failure recovery pattern data
*/
export declare function recoveryPatterns(): Promise<{
failures: import("../../dist/index.js").GenerationResult<unknown>;
recoveryStrategies: import("../../dist/index.js").GenerationResult<unknown>;
circuitBreakers: import("../../dist/index.js").GenerationResult<unknown>;
backupOperations: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate agent version migration data
*/
export declare function versionMigration(): Promise<{
versions: import("../../dist/index.js").GenerationResult<unknown>;
migrations: import("../../dist/index.js").GenerationResult<unknown>;
canaryDeployments: import("../../dist/index.js").GenerationResult<unknown>;
rollbacks: import("../../dist/index.js").GenerationResult<unknown>;
}>;
export declare function runAllLifecycleExamples(): Promise<void>;
//# sourceMappingURL=agent-lifecycle.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"agent-lifecycle.d.ts","sourceRoot":"","sources":["agent-lifecycle.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AASH;;GAEG;AACH,wBAAsB,wBAAwB;;;;GAgI7C;AAMD;;GAEG;AACH,wBAAsB,oBAAoB;;;;;GAyIzC;AAMD;;GAEG;AACH,wBAAsB,oBAAoB;;;;;GAyIzC;AAMD;;GAEG;AACH,wBAAsB,gBAAgB;;;;;GAwHrC;AAMD;;GAEG;AACH,wBAAsB,gBAAgB;;;;;GAiJrC;AAMD,wBAAsB,uBAAuB,kBAyB5C"}

View File

@@ -0,0 +1,667 @@
"use strict";
/**
* Agent Lifecycle Management Examples
*
* Demonstrates agent lifecycle patterns including spawning/termination,
* state synchronization, health checks, recovery patterns, and
* version migration for dynamic agent systems.
*
* Integrates with:
* - claude-flow: Agent lifecycle hooks and state management
* - ruv-swarm: Dynamic agent spawning and coordination
* - Kubernetes: Container orchestration patterns
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.agentSpawningTermination = agentSpawningTermination;
exports.stateSynchronization = stateSynchronization;
exports.healthCheckScenarios = healthCheckScenarios;
exports.recoveryPatterns = recoveryPatterns;
exports.versionMigration = versionMigration;
exports.runAllLifecycleExamples = runAllLifecycleExamples;
const index_js_1 = require("../../dist/index.js");
// ============================================================================
// Example 1: Agent Spawning and Termination
// ============================================================================
/**
* Generate agent spawning and termination lifecycle events
*/
async function agentSpawningTermination() {
console.log('\n🚀 Example 1: Agent Spawning and Termination\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate agent lifecycle events
const lifecycleEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'agent_spawn_requested',
'agent_initializing',
'agent_ready',
'agent_active',
'agent_idle',
'agent_terminating',
'agent_terminated',
'agent_failed',
],
schema: {
event_id: 'UUID',
agent_id: 'agent-{1-100}',
event_type: 'one of eventTypes',
reason: 'spawn reason or termination reason',
requested_by: 'coordinator | auto_scaler | user | system',
resource_allocation: {
cpu_cores: 'number (0.5-8.0)',
memory_mb: 'number (512-8192)',
disk_mb: 'number (1024-10240)',
},
initialization_config: {
agent_type: 'worker | coordinator | specialist | observer',
capabilities: ['array of 1-5 capabilities'],
priority: 'high | medium | low',
max_lifetime_minutes: 'number (60-14400) or null',
},
state_data_size_mb: 'number (0-1000)',
startup_time_ms: 'number (100-10000)',
shutdown_time_ms: 'number (50-5000)',
exit_code: 'number (0-255) or null',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
timeRange: {
start: new Date(Date.now() - 86400000), // Last 24 hours
end: new Date(),
},
});
// Generate agent spawn strategies
const spawnStrategies = await synth.generateStructured({
count: 20,
schema: {
strategy_id: 'UUID',
strategy_name: 'descriptive name',
trigger_type: 'manual | scheduled | load_based | event_driven | predictive',
conditions: ['array of 2-4 conditions'],
agent_template: {
agent_type: 'worker | coordinator | specialist | observer',
base_config: 'JSON configuration object',
scaling_limits: {
min_instances: 'number (1-5)',
max_instances: 'number (10-100)',
scale_up_threshold: 'number (0-100)',
scale_down_threshold: 'number (0-100)',
},
},
spawn_pattern: 'immediate | gradual | burst',
cooldown_seconds: 'number (30-600)',
success_rate: 'number (0-100)',
avg_spawn_time_ms: 'number (500-15000)',
},
});
// Generate resource pool state
const resourcePool = await synth.generateTimeSeries({
count: 100,
interval: '5m',
metrics: [
'active_agents',
'spawning_agents',
'terminating_agents',
'failed_spawns',
'total_cpu_usage',
'total_memory_mb',
'available_slots',
],
trend: 'mixed',
});
console.log('Agent Lifecycle Analysis:');
console.log(`- Lifecycle events: ${lifecycleEvents.data.length}`);
console.log(`- Spawn strategies: ${spawnStrategies.data.length}`);
console.log(`- Resource pool snapshots: ${resourcePool.data.length}`);
// Analyze lifecycle events
const spawnedCount = lifecycleEvents.data.filter((e) => e.event_type === 'agent_ready').length;
const terminatedCount = lifecycleEvents.data.filter((e) => e.event_type === 'agent_terminated').length;
const failedCount = lifecycleEvents.data.filter((e) => e.event_type === 'agent_failed').length;
console.log(`\nLifecycle Statistics:`);
console.log(`- Successfully spawned: ${spawnedCount}`);
console.log(`- Terminated: ${terminatedCount}`);
console.log(`- Failed: ${failedCount} (${((failedCount / lifecycleEvents.data.length) * 100).toFixed(1)}%)`);
// Calculate average spawn time
const avgSpawnTime = lifecycleEvents.data
.filter((e) => e.startup_time_ms)
.reduce((sum, e) => sum + e.startup_time_ms, 0) / spawnedCount;
console.log(`- Average spawn time: ${avgSpawnTime.toFixed(0)}ms`);
// Claude-Flow integration
console.log('\nClaude-Flow Integration:');
console.log('npx claude-flow@alpha hooks pre-task --spawn-agents 5');
console.log('// MCP: agent_spawn with configuration');
console.log('npx claude-flow@alpha hooks post-task --cleanup-agents true');
return { lifecycleEvents, spawnStrategies, resourcePool };
}
// ============================================================================
// Example 2: State Synchronization
// ============================================================================
/**
* Generate state synchronization data for distributed agents
*/
async function stateSynchronization() {
console.log('\n🔄 Example 2: State Synchronization\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate agent state snapshots
const stateSnapshots = await synth.generateStructured({
count: 500,
schema: {
snapshot_id: 'UUID',
agent_id: 'agent-{1-50}',
version: 'number (1-1000)',
state_type: 'full | incremental | checkpoint',
state_data: {
memory: {
short_term: 'JSON object',
long_term: 'JSON object',
working_set_size_mb: 'number (1-500)',
},
task_queue: ['array of 0-20 task ids'],
active_connections: ['array of 0-10 agent ids'],
performance_metrics: {
tasks_completed: 'number (0-1000)',
cpu_usage: 'number (0-100)',
memory_usage: 'number (0-100)',
},
custom_state: 'JSON object',
},
state_size_bytes: 'number (1024-10485760)',
compression_ratio: 'number (0.3-1.0)',
checksum: 'SHA-256 hash',
created_at: 'ISO timestamp',
},
});
// Generate synchronization events
const syncEvents = await synth.generateEvents({
count: 1000,
eventTypes: [
'state_saved',
'state_loaded',
'state_replicated',
'state_conflict',
'state_merged',
'state_rollback',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
agent_id: 'agent-{1-50}',
snapshot_id: 'UUID (from stateSnapshots)',
target_location: 'local | remote | backup | replica-{1-5}',
sync_strategy: 'optimistic | pessimistic | eventual | strong',
duration_ms: 'number (10-5000)',
data_transferred_mb: 'number (0.1-100)',
success: 'boolean (95% true)',
conflict_resolved: 'boolean or null',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
// Generate state consistency checks
const consistencyChecks = await synth.generateStructured({
count: 100,
schema: {
check_id: 'UUID',
check_type: 'integrity | consistency | replication | divergence',
agents_checked: ['array of 3-10 agent ids'],
snapshot_versions: ['array of version numbers'],
consistency_score: 'number (0-100)',
divergent_agents: ['array of 0-3 agent ids or empty'],
anomalies_detected: ['array of 0-2 anomaly descriptions or empty'],
corrective_action: 'none | resync | repair | rollback | null',
duration_ms: 'number (100-10000)',
timestamp: 'ISO timestamp',
},
});
// Generate state synchronization topology
const syncTopology = await synth.generateStructured({
count: 1,
schema: {
topology_id: 'UUID',
sync_pattern: 'star | mesh | ring | hierarchical | hybrid',
nodes: [
{
node_id: 'node-{1-10}',
role: 'primary | replica | backup | cache',
agents_hosted: ['array of 5-10 agent ids'],
sync_frequency_seconds: 'number (1-300)',
replication_lag_ms: 'number (0-1000)',
storage_capacity_gb: 'number (10-1000)',
storage_used_gb: 'number (proportional to capacity)',
},
],
sync_protocol: 'raft | paxos | gossip | two_phase_commit',
conflict_resolution: 'last_write_wins | merge | vector_clock | manual',
},
});
console.log('State Synchronization Analysis:');
console.log(`- State snapshots: ${stateSnapshots.data.length}`);
console.log(`- Sync events: ${syncEvents.data.length}`);
console.log(`- Consistency checks: ${consistencyChecks.data.length}`);
// Analyze synchronization success
const successfulSyncs = syncEvents.data.filter((e) => e.success).length;
const avgSyncTime = syncEvents.data.reduce((sum, e) => sum + e.duration_ms, 0) / syncEvents.data.length;
console.log(`\nSynchronization Metrics:`);
console.log(`- Success rate: ${((successfulSyncs / syncEvents.data.length) * 100).toFixed(1)}%`);
console.log(`- Average sync time: ${avgSyncTime.toFixed(0)}ms`);
// Consistency analysis
const avgConsistency = consistencyChecks.data.reduce((sum, c) => sum + c.consistency_score, 0) / consistencyChecks.data.length;
console.log(`- Average consistency score: ${avgConsistency.toFixed(1)}/100`);
console.log(`- Anomalies detected: ${consistencyChecks.data.filter((c) => c.anomalies_detected.length > 0).length}`);
// Integration pattern
console.log('\nState Sync Integration:');
console.log('// Store state in distributed storage (Redis, etcd)');
console.log('await redis.set(`agent:${agentId}:state`, JSON.stringify(state));');
console.log('// Use claude-flow memory for coordination state');
console.log('npx claude-flow@alpha hooks session-end --export-state true');
return { stateSnapshots, syncEvents, consistencyChecks, syncTopology };
}
// ============================================================================
// Example 3: Health Check Scenarios
// ============================================================================
/**
* Generate health check and monitoring data
*/
async function healthCheckScenarios() {
console.log('\n💊 Example 3: Health Check Scenarios\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate health check results
const healthChecks = await synth.generateEvents({
count: 1000,
eventTypes: [
'health_check_passed',
'health_check_degraded',
'health_check_failed',
'health_check_timeout',
],
schema: {
check_id: 'UUID',
agent_id: 'agent-{1-80}',
event_type: 'one of eventTypes',
check_type: 'liveness | readiness | startup | deep',
health_score: 'number (0-100)',
metrics: {
response_time_ms: 'number (1-5000)',
cpu_usage: 'number (0-100)',
memory_usage: 'number (0-100)',
active_connections: 'number (0-100)',
error_rate: 'number (0-100)',
queue_depth: 'number (0-1000)',
},
issues_detected: ['array of 0-3 issue descriptions or empty'],
recovery_actions: ['array of 0-2 actions or empty'],
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
// Generate health monitoring configuration
const healthConfigs = await synth.generateStructured({
count: 50,
schema: {
agent_id: 'agent-{1-50}',
liveness_probe: {
enabled: 'boolean',
interval_seconds: 'number (5-60)',
timeout_seconds: 'number (1-10)',
failure_threshold: 'number (3-10)',
success_threshold: 'number (1-3)',
},
readiness_probe: {
enabled: 'boolean',
interval_seconds: 'number (5-30)',
timeout_seconds: 'number (1-10)',
failure_threshold: 'number (3-10)',
},
health_thresholds: {
cpu_warning: 'number (70-85)',
cpu_critical: 'number (85-95)',
memory_warning: 'number (70-85)',
memory_critical: 'number (85-95)',
error_rate_threshold: 'number (5-20)',
},
auto_recovery_enabled: 'boolean',
health_status: 'healthy | degraded | unhealthy | unknown',
},
});
// Generate health time series
const healthTimeSeries = await synth.generateTimeSeries({
count: 200,
interval: '1m',
metrics: [
'healthy_agents',
'degraded_agents',
'unhealthy_agents',
'avg_health_score',
'failed_checks',
'recovery_actions',
],
trend: 'stable',
});
// Generate auto-healing actions
const healingActions = await synth.generateStructured({
count: 50,
schema: {
action_id: 'UUID',
agent_id: 'agent-{1-80}',
trigger_check_id: 'UUID (from healthChecks)',
action_type: 'restart | reset_state | scale_resources | failover | isolate',
severity: 'low | medium | high | critical',
executed_at: 'ISO timestamp',
duration_ms: 'number (100-30000)',
success: 'boolean (85% true)',
health_before: 'number (0-50)',
health_after: 'number (51-100) or same as health_before',
side_effects: ['array of 0-2 side effects or empty'],
},
});
console.log('Health Check Analysis:');
console.log(`- Health checks: ${healthChecks.data.length}`);
console.log(`- Agent configs: ${healthConfigs.data.length}`);
console.log(`- Time series points: ${healthTimeSeries.data.length}`);
console.log(`- Healing actions: ${healingActions.data.length}`);
// Analyze health check results
const passedChecks = healthChecks.data.filter((c) => c.event_type === 'health_check_passed').length;
const failedChecks = healthChecks.data.filter((c) => c.event_type === 'health_check_failed').length;
console.log(`\nHealth Statistics:`);
console.log(`- Passed: ${passedChecks} (${((passedChecks / healthChecks.data.length) * 100).toFixed(1)}%)`);
console.log(`- Failed: ${failedChecks} (${((failedChecks / healthChecks.data.length) * 100).toFixed(1)}%)`);
// Auto-healing effectiveness
const successfulHealing = healingActions.data.filter((a) => a.success).length;
const avgHealthImprovement = healingActions.data
.filter((a) => a.success)
.reduce((sum, a) => sum + (a.health_after - a.health_before), 0) / successfulHealing;
console.log(`\nAuto-Healing:`);
console.log(`- Actions taken: ${healingActions.data.length}`);
console.log(`- Success rate: ${((successfulHealing / healingActions.data.length) * 100).toFixed(1)}%`);
console.log(`- Average health improvement: +${avgHealthImprovement.toFixed(1)}`);
// Kubernetes health checks
console.log('\nKubernetes Integration:');
console.log('// Define health probes in pod spec');
console.log('livenessProbe: { httpGet: { path: "/health", port: 8080 } }');
console.log('readinessProbe: { httpGet: { path: "/ready", port: 8080 } }');
return { healthChecks, healthConfigs, healthTimeSeries, healingActions };
}
// ============================================================================
// Example 4: Recovery Patterns
// ============================================================================
/**
* Generate failure recovery pattern data
*/
async function recoveryPatterns() {
console.log('\n🔧 Example 4: Recovery Patterns\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate failure scenarios
const failures = await synth.generateStructured({
count: 100,
schema: {
failure_id: 'UUID',
agent_id: 'agent-{1-60}',
failure_type: 'crash | hang | memory_leak | resource_exhaustion | network_partition | data_corruption',
severity: 'critical | high | medium | low',
impact_scope: 'single_agent | cluster | region | global',
affected_tasks: ['array of 0-20 task ids'],
data_at_risk: 'boolean',
detected_at: 'ISO timestamp',
detection_method: 'health_check | monitoring | user_report | self_reported',
mttr_seconds: 'number (10-3600)',
mttd_seconds: 'number (1-600)',
},
});
// Generate recovery strategies
const recoveryStrategies = await synth.generateStructured({
count: failures.data.length,
schema: {
strategy_id: 'UUID',
failure_id: 'UUID (from failures)',
strategy_type: 'restart | failover | rollback | rebuild | manual_intervention',
phases: [
{
phase_name: 'detection | isolation | recovery | verification | cleanup',
actions: ['array of 2-5 actions'],
duration_ms: 'number (100-60000)',
success: 'boolean',
},
],
recovery_time_objective_seconds: 'number (60-3600)',
recovery_point_objective_seconds: 'number (0-1800)',
actual_recovery_time_seconds: 'number',
data_loss_amount: 'number (0-1000 MB) or 0',
automatic: 'boolean (80% true)',
success: 'boolean (90% true)',
lessons_learned: ['array of 2-4 lessons'],
},
});
// Generate circuit breaker states
const circuitBreakers = await synth.generateTimeSeries({
count: 150,
interval: '2m',
metrics: [
'closed_circuits',
'open_circuits',
'half_open_circuits',
'total_requests',
'failed_requests',
'trips_per_interval',
],
trend: 'mixed',
});
// Generate backup and restore operations
const backupOperations = await synth.generateStructured({
count: 200,
schema: {
operation_id: 'UUID',
operation_type: 'backup | restore | verify | cleanup',
agent_id: 'agent-{1-60}',
backup_id: 'backup-UUID',
data_size_mb: 'number (10-5000)',
duration_ms: 'number (1000-300000)',
storage_location: 'local | s3 | gcs | azure_blob',
compression_enabled: 'boolean',
encryption_enabled: 'boolean',
success: 'boolean (95% true)',
timestamp: 'ISO timestamp',
},
});
console.log('Recovery Pattern Analysis:');
console.log(`- Failures recorded: ${failures.data.length}`);
console.log(`- Recovery strategies: ${recoveryStrategies.data.length}`);
console.log(`- Circuit breaker metrics: ${circuitBreakers.data.length}`);
console.log(`- Backup operations: ${backupOperations.data.length}`);
// Analyze recovery effectiveness
const successfulRecoveries = recoveryStrategies.data.filter((s) => s.success).length;
const automaticRecoveries = recoveryStrategies.data.filter((s) => s.automatic).length;
console.log(`\nRecovery Effectiveness:`);
console.log(`- Success rate: ${((successfulRecoveries / recoveryStrategies.data.length) * 100).toFixed(1)}%`);
console.log(`- Automatic recoveries: ${((automaticRecoveries / recoveryStrategies.data.length) * 100).toFixed(1)}%`);
// Calculate average recovery times
const avgMTTR = failures.data.reduce((sum, f) => sum + f.mttr_seconds, 0) / failures.data.length;
const avgMTTD = failures.data.reduce((sum, f) => sum + f.mttd_seconds, 0) / failures.data.length;
console.log(`- Average MTTR: ${avgMTTR.toFixed(0)} seconds`);
console.log(`- Average MTTD: ${avgMTTD.toFixed(0)} seconds`);
// Data loss analysis
const dataLossIncidents = recoveryStrategies.data.filter((s) => s.data_loss_amount > 0).length;
console.log(`\nData Protection:`);
console.log(`- Incidents with data loss: ${dataLossIncidents} (${((dataLossIncidents / recoveryStrategies.data.length) * 100).toFixed(1)}%)`);
console.log(`- Successful backups: ${backupOperations.data.filter((b) => b.operation_type === 'backup' && b.success).length}`);
console.log('\nRecovery Pattern Implementation:');
console.log('// Implement circuit breaker pattern');
console.log('// Use claude-flow hooks for automatic recovery');
console.log('npx claude-flow@alpha hooks enable --type "error-recovery"');
return { failures, recoveryStrategies, circuitBreakers, backupOperations };
}
// ============================================================================
// Example 5: Version Migration
// ============================================================================
/**
* Generate agent version migration data
*/
async function versionMigration() {
console.log('\n🔄 Example 5: Version Migration\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate version information
const versions = await synth.generateStructured({
count: 10,
schema: {
version_id: 'UUID',
version_number: 'semantic version (e.g., 2.5.1)',
release_date: 'ISO timestamp',
changes: {
features: ['array of 2-5 new features'],
improvements: ['array of 1-4 improvements'],
bug_fixes: ['array of 2-6 bug fixes'],
breaking_changes: ['array of 0-2 breaking changes or empty'],
},
compatibility: {
backward_compatible: 'boolean',
migration_required: 'boolean',
rollback_supported: 'boolean',
},
deployment_strategy: 'blue_green | rolling | canary | recreate',
},
});
// Generate migration operations
const migrations = await synth.generateStructured({
count: 50,
schema: {
migration_id: 'UUID',
from_version: 'semantic version',
to_version: 'semantic version',
agent_id: 'agent-{1-100}',
migration_type: 'in_place | side_by_side | recreate',
migration_steps: [
{
step_id: 'UUID',
step_name: 'descriptive step name',
step_type: 'backup | stop | migrate_data | update_code | restart | verify',
duration_ms: 'number (100-60000)',
success: 'boolean',
rollback_supported: 'boolean',
},
],
downtime_ms: 'number (0-120000)',
data_migrated_mb: 'number (0-1000)',
overall_status: 'in_progress | completed | failed | rolled_back',
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp or null',
},
constraints: [
'Migration should have 4-8 steps',
'85% of migrations should complete successfully',
'Failed migrations should support rollback',
],
});
// Generate canary deployment data
const canaryDeployments = await synth.generateStructured({
count: 15,
schema: {
deployment_id: 'UUID',
version: 'semantic version',
canary_percentage: 'number (5-50)',
canary_agents: ['array of agent ids'],
stable_agents: ['array of agent ids'],
metrics_comparison: {
canary_error_rate: 'number (0-10)',
stable_error_rate: 'number (0-10)',
canary_latency_p99: 'number (10-1000)',
stable_latency_p99: 'number (10-1000)',
canary_throughput: 'number (100-10000)',
stable_throughput: 'number (100-10000)',
},
decision: 'promote | hold | rollback',
decision_reason: 'detailed reason',
monitoring_duration_minutes: 'number (30-1440)',
},
});
// Generate rollback events
const rollbacks = await synth.generateStructured({
count: 10,
schema: {
rollback_id: 'UUID',
migration_id: 'UUID (from migrations)',
trigger: 'high_error_rate | performance_degradation | manual | data_inconsistency',
rollback_strategy: 'automated | manual',
affected_agents: ['array of 5-30 agent ids'],
rollback_duration_ms: 'number (5000-300000)',
data_reverted_mb: 'number (0-1000)',
success: 'boolean (95% true)',
timestamp: 'ISO timestamp',
},
});
console.log('Version Migration Analysis:');
console.log(`- Versions tracked: ${versions.data.length}`);
console.log(`- Migrations executed: ${migrations.data.length}`);
console.log(`- Canary deployments: ${canaryDeployments.data.length}`);
console.log(`- Rollbacks: ${rollbacks.data.length}`);
// Analyze migration success
const successfulMigrations = migrations.data.filter((m) => m.overall_status === 'completed').length;
const failedMigrations = migrations.data.filter((m) => m.overall_status === 'failed').length;
console.log(`\nMigration Success Rate:`);
console.log(`- Completed: ${successfulMigrations} (${((successfulMigrations / migrations.data.length) * 100).toFixed(1)}%)`);
console.log(`- Failed: ${failedMigrations}`);
console.log(`- Rolled back: ${migrations.data.filter((m) => m.overall_status === 'rolled_back').length}`);
// Analyze downtime
const avgDowntime = migrations.data
.filter((m) => m.overall_status === 'completed')
.reduce((sum, m) => sum + m.downtime_ms, 0) / successfulMigrations;
const zeroDowntime = migrations.data.filter((m) => m.downtime_ms === 0).length;
console.log(`\nDowntime Analysis:`);
console.log(`- Average downtime: ${(avgDowntime / 1000).toFixed(1)} seconds`);
console.log(`- Zero-downtime migrations: ${zeroDowntime} (${((zeroDowntime / migrations.data.length) * 100).toFixed(1)}%)`);
// Canary deployment decisions
const promoted = canaryDeployments.data.filter((c) => c.decision === 'promote').length;
const rolledBack = canaryDeployments.data.filter((c) => c.decision === 'rollback').length;
console.log(`\nCanary Deployment Decisions:`);
console.log(`- Promoted: ${promoted} (${((promoted / canaryDeployments.data.length) * 100).toFixed(1)}%)`);
console.log(`- Rolled back: ${rolledBack} (${((rolledBack / canaryDeployments.data.length) * 100).toFixed(1)}%)`);
console.log('\nDeployment Integration:');
console.log('// Blue-green deployment with Kubernetes');
console.log('kubectl apply -f deployment-v2.yaml');
console.log('kubectl set image deployment/agents agent=image:v2');
console.log('// Monitor and rollback if needed');
return { versions, migrations, canaryDeployments, rollbacks };
}
// ============================================================================
// Run All Examples
// ============================================================================
async function runAllLifecycleExamples() {
console.log('🚀 Running All Agent Lifecycle Examples\n');
console.log('='.repeat(70));
try {
await agentSpawningTermination();
console.log('='.repeat(70));
await stateSynchronization();
console.log('='.repeat(70));
await healthCheckScenarios();
console.log('='.repeat(70));
await recoveryPatterns();
console.log('='.repeat(70));
await versionMigration();
console.log('='.repeat(70));
console.log('\n✅ All agent lifecycle examples completed!\n');
}
catch (error) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllLifecycleExamples().catch(console.error);
}
//# sourceMappingURL=agent-lifecycle.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,763 @@
/**
* Agent Lifecycle Management Examples
*
* Demonstrates agent lifecycle patterns including spawning/termination,
* state synchronization, health checks, recovery patterns, and
* version migration for dynamic agent systems.
*
* Integrates with:
* - claude-flow: Agent lifecycle hooks and state management
* - ruv-swarm: Dynamic agent spawning and coordination
* - Kubernetes: Container orchestration patterns
*/
import { AgenticSynth, createSynth } from '../../dist/index.js';
import type { GenerationResult } from '../../src/types.js';
// ============================================================================
// Example 1: Agent Spawning and Termination
// ============================================================================
/**
* Generate agent spawning and termination lifecycle events
*/
export async function agentSpawningTermination() {
console.log('\n🚀 Example 1: Agent Spawning and Termination\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate agent lifecycle events
const lifecycleEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'agent_spawn_requested',
'agent_initializing',
'agent_ready',
'agent_active',
'agent_idle',
'agent_terminating',
'agent_terminated',
'agent_failed',
],
schema: {
event_id: 'UUID',
agent_id: 'agent-{1-100}',
event_type: 'one of eventTypes',
reason: 'spawn reason or termination reason',
requested_by: 'coordinator | auto_scaler | user | system',
resource_allocation: {
cpu_cores: 'number (0.5-8.0)',
memory_mb: 'number (512-8192)',
disk_mb: 'number (1024-10240)',
},
initialization_config: {
agent_type: 'worker | coordinator | specialist | observer',
capabilities: ['array of 1-5 capabilities'],
priority: 'high | medium | low',
max_lifetime_minutes: 'number (60-14400) or null',
},
state_data_size_mb: 'number (0-1000)',
startup_time_ms: 'number (100-10000)',
shutdown_time_ms: 'number (50-5000)',
exit_code: 'number (0-255) or null',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
timeRange: {
start: new Date(Date.now() - 86400000), // Last 24 hours
end: new Date(),
},
});
// Generate agent spawn strategies
const spawnStrategies = await synth.generateStructured({
count: 20,
schema: {
strategy_id: 'UUID',
strategy_name: 'descriptive name',
trigger_type: 'manual | scheduled | load_based | event_driven | predictive',
conditions: ['array of 2-4 conditions'],
agent_template: {
agent_type: 'worker | coordinator | specialist | observer',
base_config: 'JSON configuration object',
scaling_limits: {
min_instances: 'number (1-5)',
max_instances: 'number (10-100)',
scale_up_threshold: 'number (0-100)',
scale_down_threshold: 'number (0-100)',
},
},
spawn_pattern: 'immediate | gradual | burst',
cooldown_seconds: 'number (30-600)',
success_rate: 'number (0-100)',
avg_spawn_time_ms: 'number (500-15000)',
},
});
// Generate resource pool state
const resourcePool = await synth.generateTimeSeries({
count: 100,
interval: '5m',
metrics: [
'active_agents',
'spawning_agents',
'terminating_agents',
'failed_spawns',
'total_cpu_usage',
'total_memory_mb',
'available_slots',
],
trend: 'mixed',
});
console.log('Agent Lifecycle Analysis:');
console.log(`- Lifecycle events: ${lifecycleEvents.data.length}`);
console.log(`- Spawn strategies: ${spawnStrategies.data.length}`);
console.log(`- Resource pool snapshots: ${resourcePool.data.length}`);
// Analyze lifecycle events
const spawnedCount = lifecycleEvents.data.filter(
(e: any) => e.event_type === 'agent_ready'
).length;
const terminatedCount = lifecycleEvents.data.filter(
(e: any) => e.event_type === 'agent_terminated'
).length;
const failedCount = lifecycleEvents.data.filter(
(e: any) => e.event_type === 'agent_failed'
).length;
console.log(`\nLifecycle Statistics:`);
console.log(`- Successfully spawned: ${spawnedCount}`);
console.log(`- Terminated: ${terminatedCount}`);
console.log(`- Failed: ${failedCount} (${((failedCount / lifecycleEvents.data.length) * 100).toFixed(1)}%)`);
// Calculate average spawn time
const avgSpawnTime = lifecycleEvents.data
.filter((e: any) => e.startup_time_ms)
.reduce((sum: number, e: any) => sum + e.startup_time_ms, 0) / spawnedCount;
console.log(`- Average spawn time: ${avgSpawnTime.toFixed(0)}ms`);
// Claude-Flow integration
console.log('\nClaude-Flow Integration:');
console.log('npx claude-flow@alpha hooks pre-task --spawn-agents 5');
console.log('// MCP: agent_spawn with configuration');
console.log('npx claude-flow@alpha hooks post-task --cleanup-agents true');
return { lifecycleEvents, spawnStrategies, resourcePool };
}
// ============================================================================
// Example 2: State Synchronization
// ============================================================================
/**
* Generate state synchronization data for distributed agents
*/
export async function stateSynchronization() {
console.log('\n🔄 Example 2: State Synchronization\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate agent state snapshots
const stateSnapshots = await synth.generateStructured({
count: 500,
schema: {
snapshot_id: 'UUID',
agent_id: 'agent-{1-50}',
version: 'number (1-1000)',
state_type: 'full | incremental | checkpoint',
state_data: {
memory: {
short_term: 'JSON object',
long_term: 'JSON object',
working_set_size_mb: 'number (1-500)',
},
task_queue: ['array of 0-20 task ids'],
active_connections: ['array of 0-10 agent ids'],
performance_metrics: {
tasks_completed: 'number (0-1000)',
cpu_usage: 'number (0-100)',
memory_usage: 'number (0-100)',
},
custom_state: 'JSON object',
},
state_size_bytes: 'number (1024-10485760)',
compression_ratio: 'number (0.3-1.0)',
checksum: 'SHA-256 hash',
created_at: 'ISO timestamp',
},
});
// Generate synchronization events
const syncEvents = await synth.generateEvents({
count: 1000,
eventTypes: [
'state_saved',
'state_loaded',
'state_replicated',
'state_conflict',
'state_merged',
'state_rollback',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
agent_id: 'agent-{1-50}',
snapshot_id: 'UUID (from stateSnapshots)',
target_location: 'local | remote | backup | replica-{1-5}',
sync_strategy: 'optimistic | pessimistic | eventual | strong',
duration_ms: 'number (10-5000)',
data_transferred_mb: 'number (0.1-100)',
success: 'boolean (95% true)',
conflict_resolved: 'boolean or null',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
// Generate state consistency checks
const consistencyChecks = await synth.generateStructured({
count: 100,
schema: {
check_id: 'UUID',
check_type: 'integrity | consistency | replication | divergence',
agents_checked: ['array of 3-10 agent ids'],
snapshot_versions: ['array of version numbers'],
consistency_score: 'number (0-100)',
divergent_agents: ['array of 0-3 agent ids or empty'],
anomalies_detected: ['array of 0-2 anomaly descriptions or empty'],
corrective_action: 'none | resync | repair | rollback | null',
duration_ms: 'number (100-10000)',
timestamp: 'ISO timestamp',
},
});
// Generate state synchronization topology
const syncTopology = await synth.generateStructured({
count: 1,
schema: {
topology_id: 'UUID',
sync_pattern: 'star | mesh | ring | hierarchical | hybrid',
nodes: [
{
node_id: 'node-{1-10}',
role: 'primary | replica | backup | cache',
agents_hosted: ['array of 5-10 agent ids'],
sync_frequency_seconds: 'number (1-300)',
replication_lag_ms: 'number (0-1000)',
storage_capacity_gb: 'number (10-1000)',
storage_used_gb: 'number (proportional to capacity)',
},
],
sync_protocol: 'raft | paxos | gossip | two_phase_commit',
conflict_resolution: 'last_write_wins | merge | vector_clock | manual',
},
});
console.log('State Synchronization Analysis:');
console.log(`- State snapshots: ${stateSnapshots.data.length}`);
console.log(`- Sync events: ${syncEvents.data.length}`);
console.log(`- Consistency checks: ${consistencyChecks.data.length}`);
// Analyze synchronization success
const successfulSyncs = syncEvents.data.filter((e: any) => e.success).length;
const avgSyncTime = syncEvents.data.reduce(
(sum: number, e: any) => sum + e.duration_ms,
0
) / syncEvents.data.length;
console.log(`\nSynchronization Metrics:`);
console.log(`- Success rate: ${((successfulSyncs / syncEvents.data.length) * 100).toFixed(1)}%`);
console.log(`- Average sync time: ${avgSyncTime.toFixed(0)}ms`);
// Consistency analysis
const avgConsistency = consistencyChecks.data.reduce(
(sum: number, c: any) => sum + c.consistency_score,
0
) / consistencyChecks.data.length;
console.log(`- Average consistency score: ${avgConsistency.toFixed(1)}/100`);
console.log(`- Anomalies detected: ${consistencyChecks.data.filter((c: any) => c.anomalies_detected.length > 0).length}`);
// Integration pattern
console.log('\nState Sync Integration:');
console.log('// Store state in distributed storage (Redis, etcd)');
console.log('await redis.set(`agent:${agentId}:state`, JSON.stringify(state));');
console.log('// Use claude-flow memory for coordination state');
console.log('npx claude-flow@alpha hooks session-end --export-state true');
return { stateSnapshots, syncEvents, consistencyChecks, syncTopology };
}
// ============================================================================
// Example 3: Health Check Scenarios
// ============================================================================
/**
* Generate health check and monitoring data
*/
export async function healthCheckScenarios() {
console.log('\n💊 Example 3: Health Check Scenarios\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate health check results
const healthChecks = await synth.generateEvents({
count: 1000,
eventTypes: [
'health_check_passed',
'health_check_degraded',
'health_check_failed',
'health_check_timeout',
],
schema: {
check_id: 'UUID',
agent_id: 'agent-{1-80}',
event_type: 'one of eventTypes',
check_type: 'liveness | readiness | startup | deep',
health_score: 'number (0-100)',
metrics: {
response_time_ms: 'number (1-5000)',
cpu_usage: 'number (0-100)',
memory_usage: 'number (0-100)',
active_connections: 'number (0-100)',
error_rate: 'number (0-100)',
queue_depth: 'number (0-1000)',
},
issues_detected: ['array of 0-3 issue descriptions or empty'],
recovery_actions: ['array of 0-2 actions or empty'],
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
// Generate health monitoring configuration
const healthConfigs = await synth.generateStructured({
count: 50,
schema: {
agent_id: 'agent-{1-50}',
liveness_probe: {
enabled: 'boolean',
interval_seconds: 'number (5-60)',
timeout_seconds: 'number (1-10)',
failure_threshold: 'number (3-10)',
success_threshold: 'number (1-3)',
},
readiness_probe: {
enabled: 'boolean',
interval_seconds: 'number (5-30)',
timeout_seconds: 'number (1-10)',
failure_threshold: 'number (3-10)',
},
health_thresholds: {
cpu_warning: 'number (70-85)',
cpu_critical: 'number (85-95)',
memory_warning: 'number (70-85)',
memory_critical: 'number (85-95)',
error_rate_threshold: 'number (5-20)',
},
auto_recovery_enabled: 'boolean',
health_status: 'healthy | degraded | unhealthy | unknown',
},
});
// Generate health time series
const healthTimeSeries = await synth.generateTimeSeries({
count: 200,
interval: '1m',
metrics: [
'healthy_agents',
'degraded_agents',
'unhealthy_agents',
'avg_health_score',
'failed_checks',
'recovery_actions',
],
trend: 'stable',
});
// Generate auto-healing actions
const healingActions = await synth.generateStructured({
count: 50,
schema: {
action_id: 'UUID',
agent_id: 'agent-{1-80}',
trigger_check_id: 'UUID (from healthChecks)',
action_type: 'restart | reset_state | scale_resources | failover | isolate',
severity: 'low | medium | high | critical',
executed_at: 'ISO timestamp',
duration_ms: 'number (100-30000)',
success: 'boolean (85% true)',
health_before: 'number (0-50)',
health_after: 'number (51-100) or same as health_before',
side_effects: ['array of 0-2 side effects or empty'],
},
});
console.log('Health Check Analysis:');
console.log(`- Health checks: ${healthChecks.data.length}`);
console.log(`- Agent configs: ${healthConfigs.data.length}`);
console.log(`- Time series points: ${healthTimeSeries.data.length}`);
console.log(`- Healing actions: ${healingActions.data.length}`);
// Analyze health check results
const passedChecks = healthChecks.data.filter(
(c: any) => c.event_type === 'health_check_passed'
).length;
const failedChecks = healthChecks.data.filter(
(c: any) => c.event_type === 'health_check_failed'
).length;
console.log(`\nHealth Statistics:`);
console.log(`- Passed: ${passedChecks} (${((passedChecks / healthChecks.data.length) * 100).toFixed(1)}%)`);
console.log(`- Failed: ${failedChecks} (${((failedChecks / healthChecks.data.length) * 100).toFixed(1)}%)`);
// Auto-healing effectiveness
const successfulHealing = healingActions.data.filter((a: any) => a.success).length;
const avgHealthImprovement = healingActions.data
.filter((a: any) => a.success)
.reduce((sum: number, a: any) => sum + (a.health_after - a.health_before), 0) / successfulHealing;
console.log(`\nAuto-Healing:`);
console.log(`- Actions taken: ${healingActions.data.length}`);
console.log(`- Success rate: ${((successfulHealing / healingActions.data.length) * 100).toFixed(1)}%`);
console.log(`- Average health improvement: +${avgHealthImprovement.toFixed(1)}`);
// Kubernetes health checks
console.log('\nKubernetes Integration:');
console.log('// Define health probes in pod spec');
console.log('livenessProbe: { httpGet: { path: "/health", port: 8080 } }');
console.log('readinessProbe: { httpGet: { path: "/ready", port: 8080 } }');
return { healthChecks, healthConfigs, healthTimeSeries, healingActions };
}
// ============================================================================
// Example 4: Recovery Patterns
// ============================================================================
/**
* Generate failure recovery pattern data
*/
export async function recoveryPatterns() {
console.log('\n🔧 Example 4: Recovery Patterns\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate failure scenarios
const failures = await synth.generateStructured({
count: 100,
schema: {
failure_id: 'UUID',
agent_id: 'agent-{1-60}',
failure_type: 'crash | hang | memory_leak | resource_exhaustion | network_partition | data_corruption',
severity: 'critical | high | medium | low',
impact_scope: 'single_agent | cluster | region | global',
affected_tasks: ['array of 0-20 task ids'],
data_at_risk: 'boolean',
detected_at: 'ISO timestamp',
detection_method: 'health_check | monitoring | user_report | self_reported',
mttr_seconds: 'number (10-3600)',
mttd_seconds: 'number (1-600)',
},
});
// Generate recovery strategies
const recoveryStrategies = await synth.generateStructured({
count: failures.data.length,
schema: {
strategy_id: 'UUID',
failure_id: 'UUID (from failures)',
strategy_type: 'restart | failover | rollback | rebuild | manual_intervention',
phases: [
{
phase_name: 'detection | isolation | recovery | verification | cleanup',
actions: ['array of 2-5 actions'],
duration_ms: 'number (100-60000)',
success: 'boolean',
},
],
recovery_time_objective_seconds: 'number (60-3600)',
recovery_point_objective_seconds: 'number (0-1800)',
actual_recovery_time_seconds: 'number',
data_loss_amount: 'number (0-1000 MB) or 0',
automatic: 'boolean (80% true)',
success: 'boolean (90% true)',
lessons_learned: ['array of 2-4 lessons'],
},
});
// Generate circuit breaker states
const circuitBreakers = await synth.generateTimeSeries({
count: 150,
interval: '2m',
metrics: [
'closed_circuits',
'open_circuits',
'half_open_circuits',
'total_requests',
'failed_requests',
'trips_per_interval',
],
trend: 'mixed',
});
// Generate backup and restore operations
const backupOperations = await synth.generateStructured({
count: 200,
schema: {
operation_id: 'UUID',
operation_type: 'backup | restore | verify | cleanup',
agent_id: 'agent-{1-60}',
backup_id: 'backup-UUID',
data_size_mb: 'number (10-5000)',
duration_ms: 'number (1000-300000)',
storage_location: 'local | s3 | gcs | azure_blob',
compression_enabled: 'boolean',
encryption_enabled: 'boolean',
success: 'boolean (95% true)',
timestamp: 'ISO timestamp',
},
});
console.log('Recovery Pattern Analysis:');
console.log(`- Failures recorded: ${failures.data.length}`);
console.log(`- Recovery strategies: ${recoveryStrategies.data.length}`);
console.log(`- Circuit breaker metrics: ${circuitBreakers.data.length}`);
console.log(`- Backup operations: ${backupOperations.data.length}`);
// Analyze recovery effectiveness
const successfulRecoveries = recoveryStrategies.data.filter((s: any) => s.success).length;
const automaticRecoveries = recoveryStrategies.data.filter((s: any) => s.automatic).length;
console.log(`\nRecovery Effectiveness:`);
console.log(`- Success rate: ${((successfulRecoveries / recoveryStrategies.data.length) * 100).toFixed(1)}%`);
console.log(`- Automatic recoveries: ${((automaticRecoveries / recoveryStrategies.data.length) * 100).toFixed(1)}%`);
// Calculate average recovery times
const avgMTTR = failures.data.reduce((sum: number, f: any) => sum + f.mttr_seconds, 0) / failures.data.length;
const avgMTTD = failures.data.reduce((sum: number, f: any) => sum + f.mttd_seconds, 0) / failures.data.length;
console.log(`- Average MTTR: ${avgMTTR.toFixed(0)} seconds`);
console.log(`- Average MTTD: ${avgMTTD.toFixed(0)} seconds`);
// Data loss analysis
const dataLossIncidents = recoveryStrategies.data.filter(
(s: any) => s.data_loss_amount > 0
).length;
console.log(`\nData Protection:`);
console.log(`- Incidents with data loss: ${dataLossIncidents} (${((dataLossIncidents / recoveryStrategies.data.length) * 100).toFixed(1)}%)`);
console.log(`- Successful backups: ${backupOperations.data.filter((b: any) => b.operation_type === 'backup' && b.success).length}`);
console.log('\nRecovery Pattern Implementation:');
console.log('// Implement circuit breaker pattern');
console.log('// Use claude-flow hooks for automatic recovery');
console.log('npx claude-flow@alpha hooks enable --type "error-recovery"');
return { failures, recoveryStrategies, circuitBreakers, backupOperations };
}
// ============================================================================
// Example 5: Version Migration
// ============================================================================
/**
* Generate agent version migration data
*/
export async function versionMigration() {
console.log('\n🔄 Example 5: Version Migration\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate version information
const versions = await synth.generateStructured({
count: 10,
schema: {
version_id: 'UUID',
version_number: 'semantic version (e.g., 2.5.1)',
release_date: 'ISO timestamp',
changes: {
features: ['array of 2-5 new features'],
improvements: ['array of 1-4 improvements'],
bug_fixes: ['array of 2-6 bug fixes'],
breaking_changes: ['array of 0-2 breaking changes or empty'],
},
compatibility: {
backward_compatible: 'boolean',
migration_required: 'boolean',
rollback_supported: 'boolean',
},
deployment_strategy: 'blue_green | rolling | canary | recreate',
},
});
// Generate migration operations
const migrations = await synth.generateStructured({
count: 50,
schema: {
migration_id: 'UUID',
from_version: 'semantic version',
to_version: 'semantic version',
agent_id: 'agent-{1-100}',
migration_type: 'in_place | side_by_side | recreate',
migration_steps: [
{
step_id: 'UUID',
step_name: 'descriptive step name',
step_type: 'backup | stop | migrate_data | update_code | restart | verify',
duration_ms: 'number (100-60000)',
success: 'boolean',
rollback_supported: 'boolean',
},
],
downtime_ms: 'number (0-120000)',
data_migrated_mb: 'number (0-1000)',
overall_status: 'in_progress | completed | failed | rolled_back',
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp or null',
},
constraints: [
'Migration should have 4-8 steps',
'85% of migrations should complete successfully',
'Failed migrations should support rollback',
],
});
// Generate canary deployment data
const canaryDeployments = await synth.generateStructured({
count: 15,
schema: {
deployment_id: 'UUID',
version: 'semantic version',
canary_percentage: 'number (5-50)',
canary_agents: ['array of agent ids'],
stable_agents: ['array of agent ids'],
metrics_comparison: {
canary_error_rate: 'number (0-10)',
stable_error_rate: 'number (0-10)',
canary_latency_p99: 'number (10-1000)',
stable_latency_p99: 'number (10-1000)',
canary_throughput: 'number (100-10000)',
stable_throughput: 'number (100-10000)',
},
decision: 'promote | hold | rollback',
decision_reason: 'detailed reason',
monitoring_duration_minutes: 'number (30-1440)',
},
});
// Generate rollback events
const rollbacks = await synth.generateStructured({
count: 10,
schema: {
rollback_id: 'UUID',
migration_id: 'UUID (from migrations)',
trigger: 'high_error_rate | performance_degradation | manual | data_inconsistency',
rollback_strategy: 'automated | manual',
affected_agents: ['array of 5-30 agent ids'],
rollback_duration_ms: 'number (5000-300000)',
data_reverted_mb: 'number (0-1000)',
success: 'boolean (95% true)',
timestamp: 'ISO timestamp',
},
});
console.log('Version Migration Analysis:');
console.log(`- Versions tracked: ${versions.data.length}`);
console.log(`- Migrations executed: ${migrations.data.length}`);
console.log(`- Canary deployments: ${canaryDeployments.data.length}`);
console.log(`- Rollbacks: ${rollbacks.data.length}`);
// Analyze migration success
const successfulMigrations = migrations.data.filter(
(m: any) => m.overall_status === 'completed'
).length;
const failedMigrations = migrations.data.filter(
(m: any) => m.overall_status === 'failed'
).length;
console.log(`\nMigration Success Rate:`);
console.log(`- Completed: ${successfulMigrations} (${((successfulMigrations / migrations.data.length) * 100).toFixed(1)}%)`);
console.log(`- Failed: ${failedMigrations}`);
console.log(`- Rolled back: ${migrations.data.filter((m: any) => m.overall_status === 'rolled_back').length}`);
// Analyze downtime
const avgDowntime = migrations.data
.filter((m: any) => m.overall_status === 'completed')
.reduce((sum: number, m: any) => sum + m.downtime_ms, 0) / successfulMigrations;
const zeroDowntime = migrations.data.filter((m: any) => m.downtime_ms === 0).length;
console.log(`\nDowntime Analysis:`);
console.log(`- Average downtime: ${(avgDowntime / 1000).toFixed(1)} seconds`);
console.log(`- Zero-downtime migrations: ${zeroDowntime} (${((zeroDowntime / migrations.data.length) * 100).toFixed(1)}%)`);
// Canary deployment decisions
const promoted = canaryDeployments.data.filter((c: any) => c.decision === 'promote').length;
const rolledBack = canaryDeployments.data.filter((c: any) => c.decision === 'rollback').length;
console.log(`\nCanary Deployment Decisions:`);
console.log(`- Promoted: ${promoted} (${((promoted / canaryDeployments.data.length) * 100).toFixed(1)}%)`);
console.log(`- Rolled back: ${rolledBack} (${((rolledBack / canaryDeployments.data.length) * 100).toFixed(1)}%)`);
console.log('\nDeployment Integration:');
console.log('// Blue-green deployment with Kubernetes');
console.log('kubectl apply -f deployment-v2.yaml');
console.log('kubectl set image deployment/agents agent=image:v2');
console.log('// Monitor and rollback if needed');
return { versions, migrations, canaryDeployments, rollbacks };
}
// ============================================================================
// Run All Examples
// ============================================================================
export async function runAllLifecycleExamples() {
console.log('🚀 Running All Agent Lifecycle Examples\n');
console.log('='.repeat(70));
try {
await agentSpawningTermination();
console.log('='.repeat(70));
await stateSynchronization();
console.log('='.repeat(70));
await healthCheckScenarios();
console.log('='.repeat(70));
await recoveryPatterns();
console.log('='.repeat(70));
await versionMigration();
console.log('='.repeat(70));
console.log('\n✅ All agent lifecycle examples completed!\n');
} catch (error: any) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllLifecycleExamples().catch(console.error);
}

View File

@@ -0,0 +1,52 @@
/**
* Collective Intelligence Examples
*
* Demonstrates swarm intelligence patterns including collaborative
* problem-solving, knowledge sharing, emergent behavior simulation,
* voting and consensus mechanisms, and reputation systems.
*
* Integrates with:
* - claude-flow: Neural pattern recognition and learning
* - ruv-swarm: Collective intelligence coordination
* - AgenticDB: Distributed knowledge storage
*/
/**
* Generate collaborative problem-solving session data
*/
export declare function collaborativeProblemSolving(): Promise<import("../../dist/index.js").GenerationResult<unknown>>;
/**
* Generate knowledge sharing and transfer data
*/
export declare function knowledgeSharingPatterns(): Promise<{
knowledgeBase: import("../../dist/index.js").GenerationResult<unknown>;
transferEvents: import("../../dist/index.js").GenerationResult<unknown>;
agentProfiles: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate emergent behavior patterns in swarm systems
*/
export declare function emergentBehaviorSimulation(): Promise<{
swarmStates: import("../../dist/index.js").GenerationResult<unknown>;
interactions: import("../../dist/index.js").GenerationResult<unknown>;
emergentPatterns: import("../../dist/index.js").GenerationResult<unknown>;
behaviorEvolution: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate voting and consensus mechanism data
*/
export declare function votingAndConsensusData(): Promise<{
votingSessions: import("../../dist/index.js").GenerationResult<unknown>;
consensusMechanisms: import("../../dist/index.js").GenerationResult<unknown>;
votingBehavior: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate reputation and trust system data
*/
export declare function reputationSystems(): Promise<{
reputationProfiles: import("../../dist/index.js").GenerationResult<unknown>;
reputationEvents: import("../../dist/index.js").GenerationResult<unknown>;
trustRelationships: import("../../dist/index.js").GenerationResult<unknown>;
reputationChanges: import("../../dist/index.js").GenerationResult<unknown>;
}>;
export declare function runAllCollectiveIntelligenceExamples(): Promise<void>;
//# sourceMappingURL=collective-intelligence.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"collective-intelligence.d.ts","sourceRoot":"","sources":["collective-intelligence.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AASH;;GAEG;AACH,wBAAsB,2BAA2B,qEAqHhD;AAMD;;GAEG;AACH,wBAAsB,wBAAwB;;;;GAmG7C;AAMD;;GAEG;AACH,wBAAsB,0BAA0B;;;;;GA6G/C;AAMD;;GAEG;AACH,wBAAsB,sBAAsB;;;;GAmH3C;AAMD;;GAEG;AACH,wBAAsB,iBAAiB;;;;;GAqItC;AAMD,wBAAsB,oCAAoC,kBAyBzD"}

View File

@@ -0,0 +1,578 @@
"use strict";
/**
* Collective Intelligence Examples
*
* Demonstrates swarm intelligence patterns including collaborative
* problem-solving, knowledge sharing, emergent behavior simulation,
* voting and consensus mechanisms, and reputation systems.
*
* Integrates with:
* - claude-flow: Neural pattern recognition and learning
* - ruv-swarm: Collective intelligence coordination
* - AgenticDB: Distributed knowledge storage
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.collaborativeProblemSolving = collaborativeProblemSolving;
exports.knowledgeSharingPatterns = knowledgeSharingPatterns;
exports.emergentBehaviorSimulation = emergentBehaviorSimulation;
exports.votingAndConsensusData = votingAndConsensusData;
exports.reputationSystems = reputationSystems;
exports.runAllCollectiveIntelligenceExamples = runAllCollectiveIntelligenceExamples;
const index_js_1 = require("../../dist/index.js");
// ============================================================================
// Example 1: Collaborative Problem-Solving
// ============================================================================
/**
* Generate collaborative problem-solving session data
*/
async function collaborativeProblemSolving() {
console.log('\n🧩 Example 1: Collaborative Problem-Solving\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate problem-solving sessions
const sessions = await synth.generateStructured({
count: 30,
schema: {
session_id: 'UUID',
problem: {
id: 'UUID',
title: 'complex problem title',
description: 'detailed problem description',
domain: 'software_architecture | data_analysis | optimization | debugging | design',
complexity: 'number (1-10)',
estimated_time_hours: 'number (1-48)',
},
participating_agents: [
{
agent_id: 'agent-{1-20}',
role: 'researcher | analyst | implementer | reviewer | facilitator',
expertise: ['array of 2-4 expertise areas'],
contribution_count: 'number (0-50)',
quality_score: 'number (0-100)',
},
],
solution_proposals: [
{
proposal_id: 'UUID',
proposer_agent_id: 'agent id from participants',
approach: 'detailed solution approach',
estimated_effort: 'number (1-40 hours)',
pros: ['array of 2-4 advantages'],
cons: ['array of 1-3 disadvantages'],
votes_for: 'number (0-20)',
votes_against: 'number (0-20)',
feasibility_score: 'number (0-100)',
},
],
collaboration_events: [
{
event_id: 'UUID',
event_type: 'proposal | critique | enhancement | agreement | disagreement',
agent_id: 'agent id',
content: 'event description',
timestamp: 'ISO timestamp',
references: ['array of related event_ids or empty'],
},
],
selected_solution_id: 'UUID (from proposals)',
outcome: 'successful | partial | failed | ongoing',
actual_time_hours: 'number',
quality_metrics: {
solution_quality: 'number (0-100)',
collaboration_efficiency: 'number (0-100)',
innovation_score: 'number (0-100)',
consensus_level: 'number (0-100)',
},
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp or null',
},
constraints: [
'Sessions should have 3-8 participating agents',
'Should have 2-5 solution proposals per session',
'70% of sessions should be successful',
'Higher agent expertise should correlate with better outcomes',
],
});
// Analyze collaborative sessions
const successfulSessions = sessions.data.filter((s) => s.outcome === 'successful');
const avgParticipants = sessions.data.reduce((sum, s) => sum + s.participating_agents.length, 0) / sessions.data.length;
console.log('Collaborative Problem-Solving Analysis:');
console.log(`- Total sessions: ${sessions.data.length}`);
console.log(`- Successful: ${successfulSessions.length} (${((successfulSessions.length / sessions.data.length) * 100).toFixed(1)}%)`);
console.log(`- Average participants: ${avgParticipants.toFixed(1)}`);
// Calculate quality metrics
const avgQuality = successfulSessions.reduce((sum, s) => sum + s.quality_metrics.solution_quality, 0) / successfulSessions.length;
const avgInnovation = successfulSessions.reduce((sum, s) => sum + s.quality_metrics.innovation_score, 0) / successfulSessions.length;
console.log(`\nQuality Metrics:`);
console.log(`- Average solution quality: ${avgQuality.toFixed(1)}/100`);
console.log(`- Average innovation score: ${avgInnovation.toFixed(1)}/100`);
// Domain distribution
const domains = new Map();
sessions.data.forEach((s) => {
domains.set(s.problem.domain, (domains.get(s.problem.domain) || 0) + 1);
});
console.log('\nProblem Domain Distribution:');
domains.forEach((count, domain) => {
console.log(`- ${domain}: ${count}`);
});
// Claude-Flow integration
console.log('\nClaude-Flow Neural Integration:');
console.log('npx claude-flow@alpha hooks neural-train --pattern "collaboration"');
console.log('// Store successful patterns in AgenticDB for learning');
return sessions;
}
// ============================================================================
// Example 2: Knowledge Sharing Patterns
// ============================================================================
/**
* Generate knowledge sharing and transfer data
*/
async function knowledgeSharingPatterns() {
console.log('\n📚 Example 2: Knowledge Sharing Patterns\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate knowledge base entries
const knowledgeBase = await synth.generateStructured({
count: 200,
schema: {
entry_id: 'UUID',
category: 'best_practice | lesson_learned | solution_pattern | troubleshooting | architecture',
title: 'descriptive title',
content: 'detailed knowledge content (2-4 paragraphs)',
tags: ['array of 3-6 tags'],
author_agent_id: 'agent-{1-50}',
contributors: ['array of 0-5 agent ids'],
related_entries: ['array of 0-3 entry_ids or empty'],
quality_rating: 'number (0-5.0)',
usefulness_count: 'number (0-100)',
view_count: 'number (0-1000)',
created_at: 'ISO timestamp',
updated_at: 'ISO timestamp',
},
});
// Generate knowledge transfer events
const transferEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'knowledge_created',
'knowledge_shared',
'knowledge_applied',
'knowledge_validated',
'knowledge_updated',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
knowledge_entry_id: 'UUID (from knowledgeBase)',
source_agent_id: 'agent-{1-50}',
target_agent_id: 'agent-{1-50} or null',
context: 'description of usage context',
effectiveness: 'number (0-100)',
timestamp: 'ISO timestamp',
},
distribution: 'uniform',
});
// Generate agent knowledge profiles
const agentProfiles = await synth.generateStructured({
count: 50,
schema: {
agent_id: 'agent-{1-50}',
expertise_areas: ['array of 3-8 expertise domains'],
knowledge_contributed: 'number (0-20)',
knowledge_consumed: 'number (0-100)',
sharing_frequency: 'high | medium | low',
learning_rate: 'number (0-1.0)',
collaboration_score: 'number (0-100)',
influence_score: 'number (0-100)',
},
});
console.log('Knowledge Sharing Analysis:');
console.log(`- Knowledge entries: ${knowledgeBase.data.length}`);
console.log(`- Transfer events: ${transferEvents.data.length}`);
console.log(`- Agent profiles: ${agentProfiles.data.length}`);
// Analyze knowledge distribution
const categoryCount = new Map();
knowledgeBase.data.forEach((entry) => {
categoryCount.set(entry.category, (categoryCount.get(entry.category) || 0) + 1);
});
console.log('\nKnowledge Categories:');
categoryCount.forEach((count, category) => {
console.log(`- ${category}: ${count}`);
});
// Calculate sharing metrics
const avgRating = knowledgeBase.data.reduce((sum, entry) => sum + entry.quality_rating, 0) / knowledgeBase.data.length;
console.log(`\nSharing Metrics:`);
console.log(`- Average quality rating: ${avgRating.toFixed(2)}/5.0`);
console.log(`- High sharers: ${agentProfiles.data.filter((a) => a.sharing_frequency === 'high').length}`);
// AgenticDB integration
console.log('\nAgenticDB Integration:');
console.log('// Store knowledge embeddings for semantic search');
console.log('await agenticDB.storeVector({ text: knowledge.content, metadata: {...} });');
console.log('// Query similar knowledge: agenticDB.search({ query, topK: 10 });');
return { knowledgeBase, transferEvents, agentProfiles };
}
// ============================================================================
// Example 3: Emergent Behavior Simulation
// ============================================================================
/**
* Generate emergent behavior patterns in swarm systems
*/
async function emergentBehaviorSimulation() {
console.log('\n🌀 Example 3: Emergent Behavior Simulation\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate swarm state evolution
const swarmStates = await synth.generateTimeSeries({
count: 100,
interval: '1m',
metrics: [
'agent_count',
'cluster_count',
'avg_cluster_size',
'coordination_level',
'task_completion_rate',
'communication_density',
'adaptation_score',
],
trend: 'up',
seasonality: false,
});
// Generate agent interactions
const interactions = await synth.generateStructured({
count: 1000,
schema: {
interaction_id: 'UUID',
agent_a_id: 'agent-{1-100}',
agent_b_id: 'agent-{1-100}',
interaction_type: 'cooperation | competition | information_exchange | resource_sharing | conflict_resolution',
context: 'brief description of interaction',
outcome: 'positive | negative | neutral',
influence_on_behavior: 'number (-1.0 to 1.0)',
timestamp: 'ISO timestamp',
},
constraints: [
'agent_a_id should be different from agent_b_id',
'70% of interactions should be positive',
'Cooperation should be more common than competition',
],
});
// Generate emergent patterns
const emergentPatterns = await synth.generateStructured({
count: 20,
schema: {
pattern_id: 'UUID',
pattern_type: 'clustering | leader_emergence | task_specialization | self_organization | collective_decision',
description: 'detailed pattern description',
participants: ['array of 5-30 agent ids'],
emergence_time: 'ISO timestamp',
stability_score: 'number (0-100)',
efficiency_gain: 'number (0-50 percent)',
conditions: ['array of 2-4 conditions that led to emergence'],
observed_behaviors: ['array of 3-6 behaviors'],
},
});
// Generate agent behavior evolution
const behaviorEvolution = await synth.generateStructured({
count: 300,
schema: {
agent_id: 'agent-{1-100}',
timestamp: 'ISO timestamp',
behavior_traits: {
cooperation_tendency: 'number (0-1.0)',
exploration_vs_exploitation: 'number (0-1.0)',
risk_tolerance: 'number (0-1.0)',
social_connectivity: 'number (0-1.0)',
task_focus: 'generalist | specialist',
},
influenced_by: ['array of 0-3 agent ids or empty'],
role_in_swarm: 'leader | follower | bridge | isolate | specialist',
performance_score: 'number (0-100)',
},
});
console.log('Emergent Behavior Analysis:');
console.log(`- Swarm state snapshots: ${swarmStates.data.length}`);
console.log(`- Agent interactions: ${interactions.data.length}`);
console.log(`- Emergent patterns: ${emergentPatterns.data.length}`);
console.log(`- Behavior evolution points: ${behaviorEvolution.data.length}`);
// Analyze interaction types
const interactionTypes = new Map();
interactions.data.forEach((i) => {
interactionTypes.set(i.interaction_type, (interactionTypes.get(i.interaction_type) || 0) + 1);
});
console.log('\nInteraction Distribution:');
interactionTypes.forEach((count, type) => {
console.log(`- ${type}: ${count} (${((count / interactions.data.length) * 100).toFixed(1)}%)`);
});
// Pattern analysis
console.log('\nEmergent Patterns:');
emergentPatterns.data.forEach((pattern) => {
console.log(`- ${pattern.pattern_type}: ${pattern.participants.length} participants, stability ${pattern.stability_score}/100`);
});
// Ruv-Swarm collective intelligence
console.log('\nRuv-Swarm Collective Intelligence:');
console.log('npx ruv-swarm mcp start');
console.log('// MCP: neural_patterns to analyze emergent behaviors');
return { swarmStates, interactions, emergentPatterns, behaviorEvolution };
}
// ============================================================================
// Example 4: Voting and Consensus Data
// ============================================================================
/**
* Generate voting and consensus mechanism data
*/
async function votingAndConsensusData() {
console.log('\n🗳 Example 4: Voting and Consensus Mechanisms\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate voting sessions
const votingSessions = await synth.generateStructured({
count: 50,
schema: {
session_id: 'UUID',
voting_method: 'simple_majority | qualified_majority | unanimous | weighted | ranked_choice',
topic: {
id: 'UUID',
title: 'decision topic',
description: 'topic description',
importance: 'critical | high | medium | low',
},
eligible_voters: ['array of 10-50 agent ids'],
votes: [
{
voter_id: 'agent id from eligible_voters',
vote_value: 'for | against | abstain',
weight: 'number (1.0-5.0)',
reasoning: 'brief explanation',
confidence: 'number (0-100)',
cast_at: 'ISO timestamp',
},
],
result: {
decision: 'accepted | rejected | tie',
votes_for: 'number',
votes_against: 'number',
votes_abstain: 'number',
weighted_score: 'number',
participation_rate: 'number (0-100)',
consensus_level: 'number (0-100)',
},
duration_minutes: 'number (5-180)',
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp',
},
constraints: [
'Votes array should match eligible_voters count',
'Critical topics should have higher participation',
'Weighted votes should affect weighted_score',
],
});
// Generate consensus mechanisms
const consensusMechanisms = await synth.generateStructured({
count: 100,
schema: {
mechanism_id: 'UUID',
mechanism_type: 'deliberative | aggregative | iterative | delegative',
session_id: 'UUID (from votingSessions)',
rounds: [
{
round_number: 'number (1-5)',
proposals: ['array of 2-5 proposal descriptions'],
discussions: 'number (10-100)',
opinion_shifts: 'number (0-20)',
convergence_score: 'number (0-100)',
duration_minutes: 'number (5-60)',
},
],
final_consensus: 'strong | moderate | weak | none',
compromises_made: 'number (0-5)',
dissenting_opinions: ['array of 0-3 dissenting viewpoints or empty'],
},
});
// Generate agent voting behavior
const votingBehavior = await synth.generateStructured({
count: 200,
schema: {
agent_id: 'agent-{1-50}',
total_votes: 'number (0-50)',
voting_pattern: 'consistent | moderate | swing',
influence_level: 'high | medium | low',
consensus_seeking: 'number (0-100)',
independence_score: 'number (0-100)',
expertise_alignment: 'number (0-100)',
},
});
console.log('Voting and Consensus Analysis:');
console.log(`- Voting sessions: ${votingSessions.data.length}`);
console.log(`- Consensus mechanisms: ${consensusMechanisms.data.length}`);
console.log(`- Agent behaviors: ${votingBehavior.data.length}`);
// Analyze voting outcomes
const acceptedCount = votingSessions.data.filter((s) => s.result.decision === 'accepted').length;
const avgParticipation = votingSessions.data.reduce((sum, s) => sum + s.result.participation_rate, 0) / votingSessions.data.length;
console.log(`\nVoting Outcomes:`);
console.log(`- Accepted: ${acceptedCount} (${((acceptedCount / votingSessions.data.length) * 100).toFixed(1)}%)`);
console.log(`- Average participation: ${avgParticipation.toFixed(1)}%`);
// Consensus strength
const strongConsensus = consensusMechanisms.data.filter((m) => m.final_consensus === 'strong').length;
console.log(`\nConsensus Quality:`);
console.log(`- Strong consensus: ${strongConsensus} (${((strongConsensus / consensusMechanisms.data.length) * 100).toFixed(1)}%)`);
return { votingSessions, consensusMechanisms, votingBehavior };
}
// ============================================================================
// Example 5: Reputation Systems
// ============================================================================
/**
* Generate reputation and trust system data
*/
async function reputationSystems() {
console.log('\n⭐ Example 5: Reputation and Trust Systems\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate agent reputation profiles
const reputationProfiles = await synth.generateStructured({
count: 100,
schema: {
agent_id: 'agent-{1-100}',
overall_reputation: 'number (0-100)',
reputation_components: {
reliability: 'number (0-100)',
expertise: 'number (0-100)',
collaboration: 'number (0-100)',
responsiveness: 'number (0-100)',
quality: 'number (0-100)',
},
trust_score: 'number (0-100)',
endorsements: 'number (0-50)',
negative_feedback: 'number (0-20)',
tasks_completed: 'number (0-1000)',
success_rate: 'number (0-100)',
tenure_days: 'number (1-730)',
reputation_trend: 'rising | stable | declining',
badges: ['array of 0-5 achievement badges or empty'],
},
constraints: [
'Overall reputation should correlate with component scores',
'Higher success rate should correlate with higher reputation',
'Trust score should factor in tenure and feedback',
],
});
// Generate reputation events
const reputationEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'endorsement_received',
'feedback_positive',
'feedback_negative',
'task_success',
'task_failure',
'badge_earned',
'collaboration_rated',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
subject_agent_id: 'agent-{1-100}',
evaluator_agent_id: 'agent-{1-100} or null',
impact: 'number (-10 to +10)',
context: 'brief description',
evidence: 'supporting evidence description or null',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
// Generate trust relationships
const trustRelationships = await synth.generateStructured({
count: 300,
schema: {
relationship_id: 'UUID',
trustor_agent_id: 'agent-{1-100}',
trustee_agent_id: 'agent-{1-100}',
trust_level: 'number (0-100)',
relationship_type: 'direct | transitive | institutional',
interaction_count: 'number (1-100)',
successful_interactions: 'number (proportional to interaction_count)',
last_interaction: 'ISO timestamp',
trust_evolution: 'building | established | declining',
},
constraints: [
'trustor_agent_id should be different from trustee_agent_id',
'Trust level should correlate with success rate',
'More interactions should increase trust stability',
],
});
// Generate reputation decay and recovery
const reputationChanges = await synth.generateTimeSeries({
count: 200,
interval: '1d',
metrics: [
'avg_reputation',
'reputation_variance',
'positive_events',
'negative_events',
'trust_network_density',
'endorsement_rate',
],
trend: 'stable',
});
console.log('Reputation System Analysis:');
console.log(`- Agent profiles: ${reputationProfiles.data.length}`);
console.log(`- Reputation events: ${reputationEvents.data.length}`);
console.log(`- Trust relationships: ${trustRelationships.data.length}`);
console.log(`- Time series points: ${reputationChanges.data.length}`);
// Analyze reputation distribution
const highReputation = reputationProfiles.data.filter((p) => p.overall_reputation >= 80).length;
const lowReputation = reputationProfiles.data.filter((p) => p.overall_reputation < 40).length;
console.log(`\nReputation Distribution:`);
console.log(`- High reputation (≥80): ${highReputation} (${((highReputation / reputationProfiles.data.length) * 100).toFixed(1)}%)`);
console.log(`- Low reputation (<40): ${lowReputation} (${((lowReputation / reputationProfiles.data.length) * 100).toFixed(1)}%)`);
// Trust network analysis
const avgTrustLevel = trustRelationships.data.reduce((sum, r) => sum + r.trust_level, 0) / trustRelationships.data.length;
console.log(`\nTrust Network:`);
console.log(`- Average trust level: ${avgTrustLevel.toFixed(1)}/100`);
console.log(`- Established relationships: ${trustRelationships.data.filter((r) => r.trust_evolution === 'established').length}`);
// Integration with reputation tracking
console.log('\nReputation Tracking Integration:');
console.log('// Store reputation events in AgenticDB');
console.log('// Use claude-flow hooks to update reputation after tasks');
console.log('npx claude-flow@alpha hooks post-task --update-reputation true');
return { reputationProfiles, reputationEvents, trustRelationships, reputationChanges };
}
// ============================================================================
// Run All Examples
// ============================================================================
async function runAllCollectiveIntelligenceExamples() {
console.log('🚀 Running All Collective Intelligence Examples\n');
console.log('='.repeat(70));
try {
await collaborativeProblemSolving();
console.log('='.repeat(70));
await knowledgeSharingPatterns();
console.log('='.repeat(70));
await emergentBehaviorSimulation();
console.log('='.repeat(70));
await votingAndConsensusData();
console.log('='.repeat(70));
await reputationSystems();
console.log('='.repeat(70));
console.log('\n✅ All collective intelligence examples completed!\n');
}
catch (error) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllCollectiveIntelligenceExamples().catch(console.error);
}
//# sourceMappingURL=collective-intelligence.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,669 @@
/**
* Collective Intelligence Examples
*
* Demonstrates swarm intelligence patterns including collaborative
* problem-solving, knowledge sharing, emergent behavior simulation,
* voting and consensus mechanisms, and reputation systems.
*
* Integrates with:
* - claude-flow: Neural pattern recognition and learning
* - ruv-swarm: Collective intelligence coordination
* - AgenticDB: Distributed knowledge storage
*/
import { AgenticSynth, createSynth } from '../../dist/index.js';
import type { GenerationResult } from '../../src/types.js';
// ============================================================================
// Example 1: Collaborative Problem-Solving
// ============================================================================
/**
* Generate collaborative problem-solving session data
*/
export async function collaborativeProblemSolving() {
console.log('\n🧩 Example 1: Collaborative Problem-Solving\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate problem-solving sessions
const sessions = await synth.generateStructured({
count: 30,
schema: {
session_id: 'UUID',
problem: {
id: 'UUID',
title: 'complex problem title',
description: 'detailed problem description',
domain: 'software_architecture | data_analysis | optimization | debugging | design',
complexity: 'number (1-10)',
estimated_time_hours: 'number (1-48)',
},
participating_agents: [
{
agent_id: 'agent-{1-20}',
role: 'researcher | analyst | implementer | reviewer | facilitator',
expertise: ['array of 2-4 expertise areas'],
contribution_count: 'number (0-50)',
quality_score: 'number (0-100)',
},
],
solution_proposals: [
{
proposal_id: 'UUID',
proposer_agent_id: 'agent id from participants',
approach: 'detailed solution approach',
estimated_effort: 'number (1-40 hours)',
pros: ['array of 2-4 advantages'],
cons: ['array of 1-3 disadvantages'],
votes_for: 'number (0-20)',
votes_against: 'number (0-20)',
feasibility_score: 'number (0-100)',
},
],
collaboration_events: [
{
event_id: 'UUID',
event_type: 'proposal | critique | enhancement | agreement | disagreement',
agent_id: 'agent id',
content: 'event description',
timestamp: 'ISO timestamp',
references: ['array of related event_ids or empty'],
},
],
selected_solution_id: 'UUID (from proposals)',
outcome: 'successful | partial | failed | ongoing',
actual_time_hours: 'number',
quality_metrics: {
solution_quality: 'number (0-100)',
collaboration_efficiency: 'number (0-100)',
innovation_score: 'number (0-100)',
consensus_level: 'number (0-100)',
},
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp or null',
},
constraints: [
'Sessions should have 3-8 participating agents',
'Should have 2-5 solution proposals per session',
'70% of sessions should be successful',
'Higher agent expertise should correlate with better outcomes',
],
});
// Analyze collaborative sessions
const successfulSessions = sessions.data.filter((s: any) => s.outcome === 'successful');
const avgParticipants = sessions.data.reduce(
(sum: number, s: any) => sum + s.participating_agents.length,
0
) / sessions.data.length;
console.log('Collaborative Problem-Solving Analysis:');
console.log(`- Total sessions: ${sessions.data.length}`);
console.log(`- Successful: ${successfulSessions.length} (${((successfulSessions.length / sessions.data.length) * 100).toFixed(1)}%)`);
console.log(`- Average participants: ${avgParticipants.toFixed(1)}`);
// Calculate quality metrics
const avgQuality = successfulSessions.reduce(
(sum: number, s: any) => sum + s.quality_metrics.solution_quality,
0
) / successfulSessions.length;
const avgInnovation = successfulSessions.reduce(
(sum: number, s: any) => sum + s.quality_metrics.innovation_score,
0
) / successfulSessions.length;
console.log(`\nQuality Metrics:`);
console.log(`- Average solution quality: ${avgQuality.toFixed(1)}/100`);
console.log(`- Average innovation score: ${avgInnovation.toFixed(1)}/100`);
// Domain distribution
const domains = new Map<string, number>();
sessions.data.forEach((s: any) => {
domains.set(s.problem.domain, (domains.get(s.problem.domain) || 0) + 1);
});
console.log('\nProblem Domain Distribution:');
domains.forEach((count, domain) => {
console.log(`- ${domain}: ${count}`);
});
// Claude-Flow integration
console.log('\nClaude-Flow Neural Integration:');
console.log('npx claude-flow@alpha hooks neural-train --pattern "collaboration"');
console.log('// Store successful patterns in AgenticDB for learning');
return sessions;
}
// ============================================================================
// Example 2: Knowledge Sharing Patterns
// ============================================================================
/**
* Generate knowledge sharing and transfer data
*/
export async function knowledgeSharingPatterns() {
console.log('\n📚 Example 2: Knowledge Sharing Patterns\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate knowledge base entries
const knowledgeBase = await synth.generateStructured({
count: 200,
schema: {
entry_id: 'UUID',
category: 'best_practice | lesson_learned | solution_pattern | troubleshooting | architecture',
title: 'descriptive title',
content: 'detailed knowledge content (2-4 paragraphs)',
tags: ['array of 3-6 tags'],
author_agent_id: 'agent-{1-50}',
contributors: ['array of 0-5 agent ids'],
related_entries: ['array of 0-3 entry_ids or empty'],
quality_rating: 'number (0-5.0)',
usefulness_count: 'number (0-100)',
view_count: 'number (0-1000)',
created_at: 'ISO timestamp',
updated_at: 'ISO timestamp',
},
});
// Generate knowledge transfer events
const transferEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'knowledge_created',
'knowledge_shared',
'knowledge_applied',
'knowledge_validated',
'knowledge_updated',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
knowledge_entry_id: 'UUID (from knowledgeBase)',
source_agent_id: 'agent-{1-50}',
target_agent_id: 'agent-{1-50} or null',
context: 'description of usage context',
effectiveness: 'number (0-100)',
timestamp: 'ISO timestamp',
},
distribution: 'uniform',
});
// Generate agent knowledge profiles
const agentProfiles = await synth.generateStructured({
count: 50,
schema: {
agent_id: 'agent-{1-50}',
expertise_areas: ['array of 3-8 expertise domains'],
knowledge_contributed: 'number (0-20)',
knowledge_consumed: 'number (0-100)',
sharing_frequency: 'high | medium | low',
learning_rate: 'number (0-1.0)',
collaboration_score: 'number (0-100)',
influence_score: 'number (0-100)',
},
});
console.log('Knowledge Sharing Analysis:');
console.log(`- Knowledge entries: ${knowledgeBase.data.length}`);
console.log(`- Transfer events: ${transferEvents.data.length}`);
console.log(`- Agent profiles: ${agentProfiles.data.length}`);
// Analyze knowledge distribution
const categoryCount = new Map<string, number>();
knowledgeBase.data.forEach((entry: any) => {
categoryCount.set(entry.category, (categoryCount.get(entry.category) || 0) + 1);
});
console.log('\nKnowledge Categories:');
categoryCount.forEach((count, category) => {
console.log(`- ${category}: ${count}`);
});
// Calculate sharing metrics
const avgRating = knowledgeBase.data.reduce(
(sum: number, entry: any) => sum + entry.quality_rating,
0
) / knowledgeBase.data.length;
console.log(`\nSharing Metrics:`);
console.log(`- Average quality rating: ${avgRating.toFixed(2)}/5.0`);
console.log(`- High sharers: ${agentProfiles.data.filter((a: any) => a.sharing_frequency === 'high').length}`);
// AgenticDB integration
console.log('\nAgenticDB Integration:');
console.log('// Store knowledge embeddings for semantic search');
console.log('await agenticDB.storeVector({ text: knowledge.content, metadata: {...} });');
console.log('// Query similar knowledge: agenticDB.search({ query, topK: 10 });');
return { knowledgeBase, transferEvents, agentProfiles };
}
// ============================================================================
// Example 3: Emergent Behavior Simulation
// ============================================================================
/**
* Generate emergent behavior patterns in swarm systems
*/
export async function emergentBehaviorSimulation() {
console.log('\n🌀 Example 3: Emergent Behavior Simulation\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate swarm state evolution
const swarmStates = await synth.generateTimeSeries({
count: 100,
interval: '1m',
metrics: [
'agent_count',
'cluster_count',
'avg_cluster_size',
'coordination_level',
'task_completion_rate',
'communication_density',
'adaptation_score',
],
trend: 'up',
seasonality: false,
});
// Generate agent interactions
const interactions = await synth.generateStructured({
count: 1000,
schema: {
interaction_id: 'UUID',
agent_a_id: 'agent-{1-100}',
agent_b_id: 'agent-{1-100}',
interaction_type: 'cooperation | competition | information_exchange | resource_sharing | conflict_resolution',
context: 'brief description of interaction',
outcome: 'positive | negative | neutral',
influence_on_behavior: 'number (-1.0 to 1.0)',
timestamp: 'ISO timestamp',
},
constraints: [
'agent_a_id should be different from agent_b_id',
'70% of interactions should be positive',
'Cooperation should be more common than competition',
],
});
// Generate emergent patterns
const emergentPatterns = await synth.generateStructured({
count: 20,
schema: {
pattern_id: 'UUID',
pattern_type: 'clustering | leader_emergence | task_specialization | self_organization | collective_decision',
description: 'detailed pattern description',
participants: ['array of 5-30 agent ids'],
emergence_time: 'ISO timestamp',
stability_score: 'number (0-100)',
efficiency_gain: 'number (0-50 percent)',
conditions: ['array of 2-4 conditions that led to emergence'],
observed_behaviors: ['array of 3-6 behaviors'],
},
});
// Generate agent behavior evolution
const behaviorEvolution = await synth.generateStructured({
count: 300,
schema: {
agent_id: 'agent-{1-100}',
timestamp: 'ISO timestamp',
behavior_traits: {
cooperation_tendency: 'number (0-1.0)',
exploration_vs_exploitation: 'number (0-1.0)',
risk_tolerance: 'number (0-1.0)',
social_connectivity: 'number (0-1.0)',
task_focus: 'generalist | specialist',
},
influenced_by: ['array of 0-3 agent ids or empty'],
role_in_swarm: 'leader | follower | bridge | isolate | specialist',
performance_score: 'number (0-100)',
},
});
console.log('Emergent Behavior Analysis:');
console.log(`- Swarm state snapshots: ${swarmStates.data.length}`);
console.log(`- Agent interactions: ${interactions.data.length}`);
console.log(`- Emergent patterns: ${emergentPatterns.data.length}`);
console.log(`- Behavior evolution points: ${behaviorEvolution.data.length}`);
// Analyze interaction types
const interactionTypes = new Map<string, number>();
interactions.data.forEach((i: any) => {
interactionTypes.set(i.interaction_type, (interactionTypes.get(i.interaction_type) || 0) + 1);
});
console.log('\nInteraction Distribution:');
interactionTypes.forEach((count, type) => {
console.log(`- ${type}: ${count} (${((count / interactions.data.length) * 100).toFixed(1)}%)`);
});
// Pattern analysis
console.log('\nEmergent Patterns:');
emergentPatterns.data.forEach((pattern: any) => {
console.log(`- ${pattern.pattern_type}: ${pattern.participants.length} participants, stability ${pattern.stability_score}/100`);
});
// Ruv-Swarm collective intelligence
console.log('\nRuv-Swarm Collective Intelligence:');
console.log('npx ruv-swarm mcp start');
console.log('// MCP: neural_patterns to analyze emergent behaviors');
return { swarmStates, interactions, emergentPatterns, behaviorEvolution };
}
// ============================================================================
// Example 4: Voting and Consensus Data
// ============================================================================
/**
* Generate voting and consensus mechanism data
*/
export async function votingAndConsensusData() {
console.log('\n🗳 Example 4: Voting and Consensus Mechanisms\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate voting sessions
const votingSessions = await synth.generateStructured({
count: 50,
schema: {
session_id: 'UUID',
voting_method: 'simple_majority | qualified_majority | unanimous | weighted | ranked_choice',
topic: {
id: 'UUID',
title: 'decision topic',
description: 'topic description',
importance: 'critical | high | medium | low',
},
eligible_voters: ['array of 10-50 agent ids'],
votes: [
{
voter_id: 'agent id from eligible_voters',
vote_value: 'for | against | abstain',
weight: 'number (1.0-5.0)',
reasoning: 'brief explanation',
confidence: 'number (0-100)',
cast_at: 'ISO timestamp',
},
],
result: {
decision: 'accepted | rejected | tie',
votes_for: 'number',
votes_against: 'number',
votes_abstain: 'number',
weighted_score: 'number',
participation_rate: 'number (0-100)',
consensus_level: 'number (0-100)',
},
duration_minutes: 'number (5-180)',
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp',
},
constraints: [
'Votes array should match eligible_voters count',
'Critical topics should have higher participation',
'Weighted votes should affect weighted_score',
],
});
// Generate consensus mechanisms
const consensusMechanisms = await synth.generateStructured({
count: 100,
schema: {
mechanism_id: 'UUID',
mechanism_type: 'deliberative | aggregative | iterative | delegative',
session_id: 'UUID (from votingSessions)',
rounds: [
{
round_number: 'number (1-5)',
proposals: ['array of 2-5 proposal descriptions'],
discussions: 'number (10-100)',
opinion_shifts: 'number (0-20)',
convergence_score: 'number (0-100)',
duration_minutes: 'number (5-60)',
},
],
final_consensus: 'strong | moderate | weak | none',
compromises_made: 'number (0-5)',
dissenting_opinions: ['array of 0-3 dissenting viewpoints or empty'],
},
});
// Generate agent voting behavior
const votingBehavior = await synth.generateStructured({
count: 200,
schema: {
agent_id: 'agent-{1-50}',
total_votes: 'number (0-50)',
voting_pattern: 'consistent | moderate | swing',
influence_level: 'high | medium | low',
consensus_seeking: 'number (0-100)',
independence_score: 'number (0-100)',
expertise_alignment: 'number (0-100)',
},
});
console.log('Voting and Consensus Analysis:');
console.log(`- Voting sessions: ${votingSessions.data.length}`);
console.log(`- Consensus mechanisms: ${consensusMechanisms.data.length}`);
console.log(`- Agent behaviors: ${votingBehavior.data.length}`);
// Analyze voting outcomes
const acceptedCount = votingSessions.data.filter(
(s: any) => s.result.decision === 'accepted'
).length;
const avgParticipation = votingSessions.data.reduce(
(sum: number, s: any) => sum + s.result.participation_rate,
0
) / votingSessions.data.length;
console.log(`\nVoting Outcomes:`);
console.log(`- Accepted: ${acceptedCount} (${((acceptedCount / votingSessions.data.length) * 100).toFixed(1)}%)`);
console.log(`- Average participation: ${avgParticipation.toFixed(1)}%`);
// Consensus strength
const strongConsensus = consensusMechanisms.data.filter(
(m: any) => m.final_consensus === 'strong'
).length;
console.log(`\nConsensus Quality:`);
console.log(`- Strong consensus: ${strongConsensus} (${((strongConsensus / consensusMechanisms.data.length) * 100).toFixed(1)}%)`);
return { votingSessions, consensusMechanisms, votingBehavior };
}
// ============================================================================
// Example 5: Reputation Systems
// ============================================================================
/**
* Generate reputation and trust system data
*/
export async function reputationSystems() {
console.log('\n⭐ Example 5: Reputation and Trust Systems\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate agent reputation profiles
const reputationProfiles = await synth.generateStructured({
count: 100,
schema: {
agent_id: 'agent-{1-100}',
overall_reputation: 'number (0-100)',
reputation_components: {
reliability: 'number (0-100)',
expertise: 'number (0-100)',
collaboration: 'number (0-100)',
responsiveness: 'number (0-100)',
quality: 'number (0-100)',
},
trust_score: 'number (0-100)',
endorsements: 'number (0-50)',
negative_feedback: 'number (0-20)',
tasks_completed: 'number (0-1000)',
success_rate: 'number (0-100)',
tenure_days: 'number (1-730)',
reputation_trend: 'rising | stable | declining',
badges: ['array of 0-5 achievement badges or empty'],
},
constraints: [
'Overall reputation should correlate with component scores',
'Higher success rate should correlate with higher reputation',
'Trust score should factor in tenure and feedback',
],
});
// Generate reputation events
const reputationEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'endorsement_received',
'feedback_positive',
'feedback_negative',
'task_success',
'task_failure',
'badge_earned',
'collaboration_rated',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
subject_agent_id: 'agent-{1-100}',
evaluator_agent_id: 'agent-{1-100} or null',
impact: 'number (-10 to +10)',
context: 'brief description',
evidence: 'supporting evidence description or null',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
// Generate trust relationships
const trustRelationships = await synth.generateStructured({
count: 300,
schema: {
relationship_id: 'UUID',
trustor_agent_id: 'agent-{1-100}',
trustee_agent_id: 'agent-{1-100}',
trust_level: 'number (0-100)',
relationship_type: 'direct | transitive | institutional',
interaction_count: 'number (1-100)',
successful_interactions: 'number (proportional to interaction_count)',
last_interaction: 'ISO timestamp',
trust_evolution: 'building | established | declining',
},
constraints: [
'trustor_agent_id should be different from trustee_agent_id',
'Trust level should correlate with success rate',
'More interactions should increase trust stability',
],
});
// Generate reputation decay and recovery
const reputationChanges = await synth.generateTimeSeries({
count: 200,
interval: '1d',
metrics: [
'avg_reputation',
'reputation_variance',
'positive_events',
'negative_events',
'trust_network_density',
'endorsement_rate',
],
trend: 'stable',
});
console.log('Reputation System Analysis:');
console.log(`- Agent profiles: ${reputationProfiles.data.length}`);
console.log(`- Reputation events: ${reputationEvents.data.length}`);
console.log(`- Trust relationships: ${trustRelationships.data.length}`);
console.log(`- Time series points: ${reputationChanges.data.length}`);
// Analyze reputation distribution
const highReputation = reputationProfiles.data.filter(
(p: any) => p.overall_reputation >= 80
).length;
const lowReputation = reputationProfiles.data.filter(
(p: any) => p.overall_reputation < 40
).length;
console.log(`\nReputation Distribution:`);
console.log(`- High reputation (≥80): ${highReputation} (${((highReputation / reputationProfiles.data.length) * 100).toFixed(1)}%)`);
console.log(`- Low reputation (<40): ${lowReputation} (${((lowReputation / reputationProfiles.data.length) * 100).toFixed(1)}%)`);
// Trust network analysis
const avgTrustLevel = trustRelationships.data.reduce(
(sum: number, r: any) => sum + r.trust_level,
0
) / trustRelationships.data.length;
console.log(`\nTrust Network:`);
console.log(`- Average trust level: ${avgTrustLevel.toFixed(1)}/100`);
console.log(`- Established relationships: ${trustRelationships.data.filter((r: any) => r.trust_evolution === 'established').length}`);
// Integration with reputation tracking
console.log('\nReputation Tracking Integration:');
console.log('// Store reputation events in AgenticDB');
console.log('// Use claude-flow hooks to update reputation after tasks');
console.log('npx claude-flow@alpha hooks post-task --update-reputation true');
return { reputationProfiles, reputationEvents, trustRelationships, reputationChanges };
}
// ============================================================================
// Run All Examples
// ============================================================================
export async function runAllCollectiveIntelligenceExamples() {
console.log('🚀 Running All Collective Intelligence Examples\n');
console.log('='.repeat(70));
try {
await collaborativeProblemSolving();
console.log('='.repeat(70));
await knowledgeSharingPatterns();
console.log('='.repeat(70));
await emergentBehaviorSimulation();
console.log('='.repeat(70));
await votingAndConsensusData();
console.log('='.repeat(70));
await reputationSystems();
console.log('='.repeat(70));
console.log('\n✅ All collective intelligence examples completed!\n');
} catch (error: any) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllCollectiveIntelligenceExamples().catch(console.error);
}

View File

@@ -0,0 +1,58 @@
/**
* Distributed Processing Examples
*
* Demonstrates distributed computation patterns including map-reduce,
* worker pools, message queues, event-driven architectures, and
* saga pattern transactions for multi-agent systems.
*
* Integrates with:
* - claude-flow: Distributed workflow orchestration
* - Apache Kafka: Event streaming
* - RabbitMQ: Message queuing
* - Redis: Distributed caching
*/
/**
* Generate map-reduce job execution data for distributed processing
*/
export declare function mapReduceJobData(): Promise<import("../../dist/index.js").GenerationResult<unknown>>;
/**
* Generate worker pool execution data
*/
export declare function workerPoolSimulation(): Promise<{
poolStates: import("../../dist/index.js").GenerationResult<unknown>;
workerMetrics: import("../../dist/index.js").GenerationResult<unknown>;
taskExecutions: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate message queue data (RabbitMQ, SQS, etc.)
*/
export declare function messageQueueScenarios(): Promise<{
queueMetrics: import("../../dist/index.js").GenerationResult<unknown>;
messages: import("../../dist/index.js").GenerationResult<unknown>;
consumers: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate event-driven architecture data (Kafka, EventBridge)
*/
export declare function eventDrivenArchitecture(): Promise<{
events: import("../../dist/index.js").GenerationResult<unknown>;
handlers: import("../../dist/index.js").GenerationResult<unknown>;
projections: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate saga pattern distributed transaction data
*/
export declare function sagaPatternTransactions(): Promise<{
sagas: import("../../dist/index.js").GenerationResult<unknown>;
sagaEvents: import("../../dist/index.js").GenerationResult<unknown>;
}>;
/**
* Generate stream processing pipeline data (Kafka Streams, Flink)
*/
export declare function streamProcessingPipeline(): Promise<{
pipeline: import("../../dist/index.js").GenerationResult<unknown>;
metrics: import("../../dist/index.js").GenerationResult<unknown>;
aggregations: import("../../dist/index.js").GenerationResult<unknown>;
}>;
export declare function runAllDistributedProcessingExamples(): Promise<void>;
//# sourceMappingURL=distributed-processing.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"distributed-processing.d.ts","sourceRoot":"","sources":["distributed-processing.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AASH;;GAEG;AACH,wBAAsB,gBAAgB,qEA2FrC;AAMD;;GAEG;AACH,wBAAsB,oBAAoB;;;;GAwFzC;AAMD;;GAEG;AACH,wBAAsB,qBAAqB;;;;GAoG1C;AAMD;;GAEG;AACH,wBAAsB,uBAAuB;;;;GAoH5C;AAMD;;GAEG;AACH,wBAAsB,uBAAuB;;;GA0G5C;AAMD;;GAEG;AACH,wBAAsB,wBAAwB;;;;GA0F7C;AAMD,wBAAsB,mCAAmC,kBA4BxD"}

View File

@@ -0,0 +1,621 @@
"use strict";
/**
* Distributed Processing Examples
*
* Demonstrates distributed computation patterns including map-reduce,
* worker pools, message queues, event-driven architectures, and
* saga pattern transactions for multi-agent systems.
*
* Integrates with:
* - claude-flow: Distributed workflow orchestration
* - Apache Kafka: Event streaming
* - RabbitMQ: Message queuing
* - Redis: Distributed caching
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.mapReduceJobData = mapReduceJobData;
exports.workerPoolSimulation = workerPoolSimulation;
exports.messageQueueScenarios = messageQueueScenarios;
exports.eventDrivenArchitecture = eventDrivenArchitecture;
exports.sagaPatternTransactions = sagaPatternTransactions;
exports.streamProcessingPipeline = streamProcessingPipeline;
exports.runAllDistributedProcessingExamples = runAllDistributedProcessingExamples;
const index_js_1 = require("../../dist/index.js");
// ============================================================================
// Example 1: Map-Reduce Job Data
// ============================================================================
/**
* Generate map-reduce job execution data for distributed processing
*/
async function mapReduceJobData() {
console.log('\n🗺 Example 1: Map-Reduce Job Execution\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate map-reduce jobs
const jobs = await synth.generateStructured({
count: 20,
schema: {
job_id: 'UUID',
job_name: 'descriptive job name (e.g., "Word Count Analysis")',
input_size_mb: 'number (100-10000)',
map_phase: {
mapper_count: 'number (10-100)',
tasks: [
{
task_id: 'UUID',
mapper_id: 'mapper-{1-100}',
input_split: 'split-{id}',
input_size_mb: 'number (proportional to job input)',
output_records: 'number (1000-100000)',
execution_time_ms: 'number (1000-30000)',
status: 'completed | failed | running',
},
],
start_time: 'ISO timestamp',
end_time: 'ISO timestamp',
duration_ms: 'number (sum of task times)',
},
shuffle_phase: {
data_transferred_mb: 'number (input_size_mb * 0.8-1.2)',
partitions: 'number (10-50)',
transfer_time_ms: 'number (5000-60000)',
},
reduce_phase: {
reducer_count: 'number (5-30)',
tasks: [
{
task_id: 'UUID',
reducer_id: 'reducer-{1-30}',
partition_id: 'number',
input_records: 'number (10000-500000)',
output_records: 'number (100-10000)',
execution_time_ms: 'number (2000-40000)',
status: 'completed | failed | running',
},
],
start_time: 'ISO timestamp',
end_time: 'ISO timestamp',
duration_ms: 'number',
},
overall_status: 'completed | failed | running',
total_duration_ms: 'number',
efficiency_score: 'number (0.5-1.0)',
},
constraints: [
'Map tasks should run in parallel',
'Reduce phase starts after map phase completes',
'95% of tasks should be completed',
'Efficiency should correlate with parallelism',
],
});
// Analyze map-reduce performance
const completedJobs = jobs.data.filter((j) => j.overall_status === 'completed');
const avgEfficiency = completedJobs.reduce((sum, j) => sum + j.efficiency_score, 0) / completedJobs.length;
console.log('Map-Reduce Analysis:');
console.log(`- Total jobs: ${jobs.data.length}`);
console.log(`- Completed: ${completedJobs.length}`);
console.log(`- Average efficiency: ${(avgEfficiency * 100).toFixed(1)}%`);
console.log(`- Total data processed: ${jobs.data.reduce((sum, j) => sum + j.input_size_mb, 0).toFixed(0)} MB`);
// Calculate parallelism metrics
const avgMappers = jobs.data.reduce((sum, j) => sum + j.map_phase.mapper_count, 0) / jobs.data.length;
const avgReducers = jobs.data.reduce((sum, j) => sum + j.reduce_phase.reducer_count, 0) / jobs.data.length;
console.log(`\nParallelism Metrics:`);
console.log(`- Average mappers: ${avgMappers.toFixed(1)}`);
console.log(`- Average reducers: ${avgReducers.toFixed(1)}`);
// Integration with claude-flow
console.log('\nClaude-Flow Integration:');
console.log('npx claude-flow@alpha hooks pre-task --description "Map-Reduce job execution"');
console.log('// Store results in coordination memory for analysis');
return jobs;
}
// ============================================================================
// Example 2: Worker Pool Simulation
// ============================================================================
/**
* Generate worker pool execution data
*/
async function workerPoolSimulation() {
console.log('\n👷 Example 2: Worker Pool Simulation\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate worker pool state over time
const poolStates = await synth.generateTimeSeries({
count: 100,
interval: '1m',
metrics: [
'active_workers',
'idle_workers',
'queue_size',
'tasks_per_minute',
'avg_task_duration_ms',
'worker_utilization',
],
trend: 'mixed',
seasonality: true,
});
// Generate individual worker performance
const workerMetrics = await synth.generateStructured({
count: 200,
schema: {
timestamp: 'ISO timestamp',
worker_id: 'worker-{1-20}',
state: 'idle | busy | initializing | terminating',
current_task_id: 'UUID or null',
tasks_completed: 'number (0-1000)',
tasks_failed: 'number (0-50)',
avg_execution_time_ms: 'number (100-5000)',
cpu_usage: 'number (0-100)',
memory_mb: 'number (100-2000)',
uptime_seconds: 'number (0-86400)',
last_heartbeat: 'ISO timestamp',
},
constraints: [
'Busy workers should have current_task_id',
'Idle workers should have null current_task_id',
'High task count should correlate with low avg execution time',
],
});
// Generate task execution history
const taskExecutions = await synth.generateEvents({
count: 500,
eventTypes: ['task_queued', 'task_assigned', 'task_started', 'task_completed', 'task_failed'],
schema: {
event_id: 'UUID',
task_id: 'UUID',
event_type: 'one of eventTypes',
worker_id: 'worker-{1-20} or null',
queue_wait_time_ms: 'number (0-10000)',
execution_time_ms: 'number (100-5000)',
priority: 'number (1-10)',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
console.log('Worker Pool Analysis:');
console.log(`- Time series points: ${poolStates.data.length}`);
console.log(`- Worker metrics: ${workerMetrics.data.length}`);
console.log(`- Task executions: ${taskExecutions.data.length}`);
// Calculate pool efficiency
const avgUtilization = poolStates.data.reduce((sum, p) => sum + p.worker_utilization, 0) / poolStates.data.length;
console.log(`\nPool Efficiency:`);
console.log(`- Average utilization: ${avgUtilization.toFixed(1)}%`);
console.log(`- Active workers: ${workerMetrics.data.filter((w) => w.state === 'busy').length}`);
console.log(`- Idle workers: ${workerMetrics.data.filter((w) => w.state === 'idle').length}`);
// Integration patterns
console.log('\nIntegration Pattern:');
console.log('// Bull Queue (Redis-based)');
console.log('const queue = new Queue("tasks", { redis: redisConfig });');
console.log('// Process with worker pool');
console.log('queue.process(concurrency, async (job) => { /* ... */ });');
return { poolStates, workerMetrics, taskExecutions };
}
// ============================================================================
// Example 3: Message Queue Scenarios
// ============================================================================
/**
* Generate message queue data (RabbitMQ, SQS, etc.)
*/
async function messageQueueScenarios() {
console.log('\n📬 Example 3: Message Queue Scenarios\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate queue metrics
const queueMetrics = await synth.generateTimeSeries({
count: 150,
interval: '30s',
metrics: [
'messages_published',
'messages_consumed',
'queue_depth',
'consumer_count',
'publish_rate',
'consume_rate',
'avg_latency_ms',
],
trend: 'up',
seasonality: false,
});
// Generate message data
const messages = await synth.generateStructured({
count: 1000,
schema: {
message_id: 'UUID',
queue_name: 'tasks | events | notifications | dead_letter',
priority: 'number (0-9)',
payload_size_bytes: 'number (100-10000)',
content_type: 'application/json | text/plain | application/octet-stream',
routing_key: 'routing key pattern',
headers: {
correlation_id: 'UUID',
reply_to: 'queue name or null',
timestamp: 'ISO timestamp',
retry_count: 'number (0-5)',
},
published_at: 'ISO timestamp',
consumed_at: 'ISO timestamp or null',
acknowledged_at: 'ISO timestamp or null',
status: 'pending | consumed | acknowledged | dead_letter | expired',
time_in_queue_ms: 'number (0-60000)',
consumer_id: 'consumer-{1-15} or null',
},
constraints: [
'Consumed messages should have consumed_at timestamp',
'Acknowledged messages should have acknowledged_at after consumed_at',
'5% of messages should be in dead_letter queue',
'Higher priority messages should have lower time_in_queue_ms',
],
});
// Generate consumer performance
const consumers = await synth.generateStructured({
count: 15,
schema: {
consumer_id: 'consumer-{1-15}',
queue_subscriptions: ['array of 1-3 queue names'],
messages_processed: 'number (0-200)',
processing_rate_per_second: 'number (1-50)',
error_count: 'number (0-20)',
avg_processing_time_ms: 'number (100-2000)',
prefetch_count: 'number (1-100)',
status: 'active | idle | error | disconnected',
last_activity: 'ISO timestamp',
},
});
console.log('Message Queue Analysis:');
console.log(`- Total messages: ${messages.data.length}`);
console.log(`- Pending: ${messages.data.filter((m) => m.status === 'pending').length}`);
console.log(`- Consumed: ${messages.data.filter((m) => m.status === 'consumed').length}`);
console.log(`- Dead letter: ${messages.data.filter((m) => m.status === 'dead_letter').length}`);
// Calculate throughput
const totalProcessed = consumers.data.reduce((sum, c) => sum + c.messages_processed, 0);
const avgLatency = messages.data
.filter((m) => m.time_in_queue_ms)
.reduce((sum, m) => sum + m.time_in_queue_ms, 0) / messages.data.length;
console.log(`\nThroughput Metrics:`);
console.log(`- Total processed: ${totalProcessed}`);
console.log(`- Active consumers: ${consumers.data.filter((c) => c.status === 'active').length}`);
console.log(`- Average latency: ${avgLatency.toFixed(0)}ms`);
// RabbitMQ integration example
console.log('\nRabbitMQ Integration:');
console.log('// Connect to RabbitMQ');
console.log('const channel = await connection.createChannel();');
console.log('await channel.assertQueue("tasks", { durable: true });');
console.log('// Publish with agentic-synth data');
return { queueMetrics, messages, consumers };
}
// ============================================================================
// Example 4: Event-Driven Architecture
// ============================================================================
/**
* Generate event-driven architecture data (Kafka, EventBridge)
*/
async function eventDrivenArchitecture() {
console.log('\n⚡ Example 4: Event-Driven Architecture\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate event streams
const events = await synth.generateEvents({
count: 2000,
eventTypes: [
'user.created',
'user.updated',
'order.placed',
'order.confirmed',
'order.shipped',
'payment.processed',
'inventory.updated',
'notification.sent',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
event_version: 'v1 | v2',
aggregate_id: 'UUID',
aggregate_type: 'user | order | payment | inventory',
payload: 'JSON object with event data',
metadata: {
causation_id: 'UUID (triggering event)',
correlation_id: 'UUID (workflow id)',
timestamp: 'ISO timestamp',
source_service: 'service name',
user_id: 'UUID or null',
},
partition_key: 'aggregate_id',
sequence_number: 'number',
published_at: 'ISO timestamp',
},
distribution: 'poisson',
timeRange: {
start: new Date(Date.now() - 3600000),
end: new Date(),
},
});
// Generate event handlers (subscribers)
const handlers = await synth.generateStructured({
count: 30,
schema: {
handler_id: 'UUID',
handler_name: 'descriptive name',
subscribed_events: ['array of 1-5 event types'],
service_name: 'service name',
processing_mode: 'sync | async | batch',
events_processed: 'number (0-500)',
events_failed: 'number (0-50)',
avg_processing_time_ms: 'number (50-2000)',
retry_policy: {
max_retries: 'number (3-10)',
backoff_strategy: 'exponential | linear | constant',
dead_letter_queue: 'boolean',
},
status: 'active | degraded | failing | disabled',
},
});
// Generate event projections (read models)
const projections = await synth.generateStructured({
count: 100,
schema: {
projection_id: 'UUID',
projection_type: 'user_profile | order_summary | inventory_view',
aggregate_id: 'UUID',
version: 'number (1-100)',
data: 'JSON object representing current state',
last_event_id: 'UUID (from events)',
last_updated: 'ISO timestamp',
consistency_lag_ms: 'number (0-5000)',
},
});
console.log('Event-Driven Architecture Analysis:');
console.log(`- Total events: ${events.data.length}`);
console.log(`- Event handlers: ${handlers.data.length}`);
console.log(`- Projections: ${projections.data.length}`);
// Event type distribution
const eventTypes = new Map();
events.data.forEach((e) => {
eventTypes.set(e.event_type, (eventTypes.get(e.event_type) || 0) + 1);
});
console.log('\nEvent Distribution:');
eventTypes.forEach((count, type) => {
console.log(`- ${type}: ${count}`);
});
// Calculate average consistency lag
const avgLag = projections.data.reduce((sum, p) => sum + p.consistency_lag_ms, 0) / projections.data.length;
console.log(`\nConsistency Metrics:`);
console.log(`- Average lag: ${avgLag.toFixed(0)}ms`);
console.log(`- Active handlers: ${handlers.data.filter((h) => h.status === 'active').length}`);
// Kafka integration
console.log('\nKafka Integration:');
console.log('// Produce events');
console.log('await producer.send({ topic: "events", messages: [...] });');
console.log('// Consume with handler coordination');
console.log('await consumer.run({ eachMessage: async ({ message }) => { /* ... */ } });');
return { events, handlers, projections };
}
// ============================================================================
// Example 5: Saga Pattern Transactions
// ============================================================================
/**
* Generate saga pattern distributed transaction data
*/
async function sagaPatternTransactions() {
console.log('\n🔄 Example 5: Saga Pattern Transactions\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate saga executions
const sagas = await synth.generateStructured({
count: 100,
schema: {
saga_id: 'UUID',
saga_type: 'order_fulfillment | payment_processing | user_registration | booking_reservation',
orchestration_type: 'orchestration | choreography',
initiator_service: 'service name',
steps: [
{
step_id: 'UUID',
step_name: 'descriptive step name',
service: 'service name',
operation: 'operation name',
compensation_operation: 'compensation operation name',
status: 'pending | executing | completed | failed | compensating | compensated',
started_at: 'ISO timestamp or null',
completed_at: 'ISO timestamp or null',
duration_ms: 'number (100-5000)',
retry_count: 'number (0-3)',
},
],
overall_status: 'in_progress | completed | failed | compensated',
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp or null',
total_duration_ms: 'number',
compensation_triggered: 'boolean',
compensation_reason: 'error description or null',
},
constraints: [
'Sagas should have 3-8 steps',
'Failed sagas should have compensation_triggered = true',
'Compensated steps should execute in reverse order',
'80% of sagas should complete successfully',
],
});
// Generate saga events
const sagaEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'saga_started',
'step_started',
'step_completed',
'step_failed',
'compensation_started',
'compensation_completed',
'saga_completed',
'saga_failed',
],
schema: {
event_id: 'UUID',
saga_id: 'UUID (from sagas)',
step_id: 'UUID or null',
event_type: 'one of eventTypes',
service: 'service name',
payload: 'JSON object',
timestamp: 'ISO timestamp',
},
});
// Analyze saga patterns
const successfulSagas = sagas.data.filter((s) => s.overall_status === 'completed');
const failedSagas = sagas.data.filter((s) => s.overall_status === 'failed');
const compensatedSagas = sagas.data.filter((s) => s.overall_status === 'compensated');
console.log('Saga Pattern Analysis:');
console.log(`- Total sagas: ${sagas.data.length}`);
console.log(`- Successful: ${successfulSagas.length} (${((successfulSagas.length / sagas.data.length) * 100).toFixed(1)}%)`);
console.log(`- Failed: ${failedSagas.length}`);
console.log(`- Compensated: ${compensatedSagas.length}`);
// Calculate average steps and duration
const avgSteps = sagas.data.reduce((sum, s) => sum + s.steps.length, 0) / sagas.data.length;
const avgDuration = sagas.data
.filter((s) => s.completed_at)
.reduce((sum, s) => sum + s.total_duration_ms, 0) / successfulSagas.length;
console.log(`\nTransaction Metrics:`);
console.log(`- Average steps per saga: ${avgSteps.toFixed(1)}`);
console.log(`- Average duration: ${avgDuration.toFixed(0)}ms`);
console.log(`- Compensation rate: ${((compensatedSagas.length / sagas.data.length) * 100).toFixed(1)}%`);
// Orchestration vs Choreography
const orchestrated = sagas.data.filter((s) => s.orchestration_type === 'orchestration').length;
const choreographed = sagas.data.filter((s) => s.orchestration_type === 'choreography').length;
console.log(`\nOrchestration Style:`);
console.log(`- Orchestration: ${orchestrated}`);
console.log(`- Choreography: ${choreographed}`);
// Integration with coordination frameworks
console.log('\nIntegration Pattern:');
console.log('// MassTransit Saga State Machine');
console.log('class OrderSaga : MassTransitStateMachine<OrderState> { /* ... */ }');
console.log('// Or custom orchestrator with agentic-synth test data');
return { sagas, sagaEvents };
}
// ============================================================================
// Example 6: Stream Processing Pipeline
// ============================================================================
/**
* Generate stream processing pipeline data (Kafka Streams, Flink)
*/
async function streamProcessingPipeline() {
console.log('\n🌊 Example 6: Stream Processing Pipeline\n');
const synth = (0, index_js_1.createSynth)({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate pipeline topology
const pipeline = await synth.generateStructured({
count: 1,
schema: {
pipeline_id: 'UUID',
pipeline_name: 'descriptive name',
stages: [
{
stage_id: 'UUID',
stage_type: 'source | transform | aggregate | filter | sink',
stage_name: 'stage name',
parallelism: 'number (1-20)',
input_topics: ['array of topic names'],
output_topics: ['array of topic names'],
transformation: 'description of transformation',
windowing: {
type: 'tumbling | sliding | session | global',
size_ms: 'number (1000-300000)',
slide_ms: 'number or null',
},
state_store: 'store name or null',
},
],
},
});
// Generate processing metrics
const metrics = await synth.generateTimeSeries({
count: 200,
interval: '30s',
metrics: [
'records_in',
'records_out',
'records_filtered',
'processing_latency_ms',
'watermark_lag_ms',
'cpu_usage',
'memory_mb',
],
trend: 'mixed',
});
// Generate windowed aggregations
const aggregations = await synth.generateStructured({
count: 150,
schema: {
window_id: 'UUID',
stage_id: 'UUID (from pipeline)',
window_start: 'ISO timestamp',
window_end: 'ISO timestamp',
record_count: 'number (100-10000)',
aggregate_values: {
sum: 'number',
avg: 'number',
min: 'number',
max: 'number',
count: 'number',
},
emitted_at: 'ISO timestamp',
late_arrivals: 'number (0-100)',
},
});
console.log('Stream Processing Analysis:');
console.log(`- Pipeline stages: ${pipeline.data[0].stages.length}`);
console.log(`- Metric points: ${metrics.data.length}`);
console.log(`- Window aggregations: ${aggregations.data.length}`);
// Calculate throughput
const avgRecordsIn = metrics.data.reduce((sum, m) => sum + m.records_in, 0) / metrics.data.length;
const avgRecordsOut = metrics.data.reduce((sum, m) => sum + m.records_out, 0) / metrics.data.length;
console.log(`\nThroughput:`);
console.log(`- Input: ${avgRecordsIn.toFixed(0)} records/interval`);
console.log(`- Output: ${avgRecordsOut.toFixed(0)} records/interval`);
console.log(`- Filter rate: ${(((avgRecordsIn - avgRecordsOut) / avgRecordsIn) * 100).toFixed(1)}%`);
console.log('\nApache Flink Integration:');
console.log('// Define stream processing job');
console.log('env.addSource(kafkaSource).keyBy(...).window(...).aggregate(...).addSink(kafkaSink);');
return { pipeline, metrics, aggregations };
}
// ============================================================================
// Run All Examples
// ============================================================================
async function runAllDistributedProcessingExamples() {
console.log('🚀 Running All Distributed Processing Examples\n');
console.log('='.repeat(70));
try {
await mapReduceJobData();
console.log('='.repeat(70));
await workerPoolSimulation();
console.log('='.repeat(70));
await messageQueueScenarios();
console.log('='.repeat(70));
await eventDrivenArchitecture();
console.log('='.repeat(70));
await sagaPatternTransactions();
console.log('='.repeat(70));
await streamProcessingPipeline();
console.log('='.repeat(70));
console.log('\n✅ All distributed processing examples completed!\n');
}
catch (error) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllDistributedProcessingExamples().catch(console.error);
}
//# sourceMappingURL=distributed-processing.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,700 @@
/**
* Distributed Processing Examples
*
* Demonstrates distributed computation patterns including map-reduce,
* worker pools, message queues, event-driven architectures, and
* saga pattern transactions for multi-agent systems.
*
* Integrates with:
* - claude-flow: Distributed workflow orchestration
* - Apache Kafka: Event streaming
* - RabbitMQ: Message queuing
* - Redis: Distributed caching
*/
import { AgenticSynth, createSynth } from '../../dist/index.js';
import type { GenerationResult } from '../../src/types.js';
// ============================================================================
// Example 1: Map-Reduce Job Data
// ============================================================================
/**
* Generate map-reduce job execution data for distributed processing
*/
export async function mapReduceJobData() {
console.log('\n🗺 Example 1: Map-Reduce Job Execution\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
cacheStrategy: 'memory',
});
// Generate map-reduce jobs
const jobs = await synth.generateStructured({
count: 20,
schema: {
job_id: 'UUID',
job_name: 'descriptive job name (e.g., "Word Count Analysis")',
input_size_mb: 'number (100-10000)',
map_phase: {
mapper_count: 'number (10-100)',
tasks: [
{
task_id: 'UUID',
mapper_id: 'mapper-{1-100}',
input_split: 'split-{id}',
input_size_mb: 'number (proportional to job input)',
output_records: 'number (1000-100000)',
execution_time_ms: 'number (1000-30000)',
status: 'completed | failed | running',
},
],
start_time: 'ISO timestamp',
end_time: 'ISO timestamp',
duration_ms: 'number (sum of task times)',
},
shuffle_phase: {
data_transferred_mb: 'number (input_size_mb * 0.8-1.2)',
partitions: 'number (10-50)',
transfer_time_ms: 'number (5000-60000)',
},
reduce_phase: {
reducer_count: 'number (5-30)',
tasks: [
{
task_id: 'UUID',
reducer_id: 'reducer-{1-30}',
partition_id: 'number',
input_records: 'number (10000-500000)',
output_records: 'number (100-10000)',
execution_time_ms: 'number (2000-40000)',
status: 'completed | failed | running',
},
],
start_time: 'ISO timestamp',
end_time: 'ISO timestamp',
duration_ms: 'number',
},
overall_status: 'completed | failed | running',
total_duration_ms: 'number',
efficiency_score: 'number (0.5-1.0)',
},
constraints: [
'Map tasks should run in parallel',
'Reduce phase starts after map phase completes',
'95% of tasks should be completed',
'Efficiency should correlate with parallelism',
],
});
// Analyze map-reduce performance
const completedJobs = jobs.data.filter((j: any) => j.overall_status === 'completed');
const avgEfficiency = completedJobs.reduce((sum: number, j: any) => sum + j.efficiency_score, 0) / completedJobs.length;
console.log('Map-Reduce Analysis:');
console.log(`- Total jobs: ${jobs.data.length}`);
console.log(`- Completed: ${completedJobs.length}`);
console.log(`- Average efficiency: ${(avgEfficiency * 100).toFixed(1)}%`);
console.log(`- Total data processed: ${jobs.data.reduce((sum: number, j: any) => sum + j.input_size_mb, 0).toFixed(0)} MB`);
// Calculate parallelism metrics
const avgMappers = jobs.data.reduce((sum: number, j: any) => sum + j.map_phase.mapper_count, 0) / jobs.data.length;
const avgReducers = jobs.data.reduce((sum: number, j: any) => sum + j.reduce_phase.reducer_count, 0) / jobs.data.length;
console.log(`\nParallelism Metrics:`);
console.log(`- Average mappers: ${avgMappers.toFixed(1)}`);
console.log(`- Average reducers: ${avgReducers.toFixed(1)}`);
// Integration with claude-flow
console.log('\nClaude-Flow Integration:');
console.log('npx claude-flow@alpha hooks pre-task --description "Map-Reduce job execution"');
console.log('// Store results in coordination memory for analysis');
return jobs;
}
// ============================================================================
// Example 2: Worker Pool Simulation
// ============================================================================
/**
* Generate worker pool execution data
*/
export async function workerPoolSimulation() {
console.log('\n👷 Example 2: Worker Pool Simulation\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate worker pool state over time
const poolStates = await synth.generateTimeSeries({
count: 100,
interval: '1m',
metrics: [
'active_workers',
'idle_workers',
'queue_size',
'tasks_per_minute',
'avg_task_duration_ms',
'worker_utilization',
],
trend: 'mixed',
seasonality: true,
});
// Generate individual worker performance
const workerMetrics = await synth.generateStructured({
count: 200,
schema: {
timestamp: 'ISO timestamp',
worker_id: 'worker-{1-20}',
state: 'idle | busy | initializing | terminating',
current_task_id: 'UUID or null',
tasks_completed: 'number (0-1000)',
tasks_failed: 'number (0-50)',
avg_execution_time_ms: 'number (100-5000)',
cpu_usage: 'number (0-100)',
memory_mb: 'number (100-2000)',
uptime_seconds: 'number (0-86400)',
last_heartbeat: 'ISO timestamp',
},
constraints: [
'Busy workers should have current_task_id',
'Idle workers should have null current_task_id',
'High task count should correlate with low avg execution time',
],
});
// Generate task execution history
const taskExecutions = await synth.generateEvents({
count: 500,
eventTypes: ['task_queued', 'task_assigned', 'task_started', 'task_completed', 'task_failed'],
schema: {
event_id: 'UUID',
task_id: 'UUID',
event_type: 'one of eventTypes',
worker_id: 'worker-{1-20} or null',
queue_wait_time_ms: 'number (0-10000)',
execution_time_ms: 'number (100-5000)',
priority: 'number (1-10)',
timestamp: 'ISO timestamp',
},
distribution: 'poisson',
});
console.log('Worker Pool Analysis:');
console.log(`- Time series points: ${poolStates.data.length}`);
console.log(`- Worker metrics: ${workerMetrics.data.length}`);
console.log(`- Task executions: ${taskExecutions.data.length}`);
// Calculate pool efficiency
const avgUtilization = poolStates.data.reduce(
(sum: number, p: any) => sum + p.worker_utilization,
0
) / poolStates.data.length;
console.log(`\nPool Efficiency:`);
console.log(`- Average utilization: ${avgUtilization.toFixed(1)}%`);
console.log(`- Active workers: ${workerMetrics.data.filter((w: any) => w.state === 'busy').length}`);
console.log(`- Idle workers: ${workerMetrics.data.filter((w: any) => w.state === 'idle').length}`);
// Integration patterns
console.log('\nIntegration Pattern:');
console.log('// Bull Queue (Redis-based)');
console.log('const queue = new Queue("tasks", { redis: redisConfig });');
console.log('// Process with worker pool');
console.log('queue.process(concurrency, async (job) => { /* ... */ });');
return { poolStates, workerMetrics, taskExecutions };
}
// ============================================================================
// Example 3: Message Queue Scenarios
// ============================================================================
/**
* Generate message queue data (RabbitMQ, SQS, etc.)
*/
export async function messageQueueScenarios() {
console.log('\n📬 Example 3: Message Queue Scenarios\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate queue metrics
const queueMetrics = await synth.generateTimeSeries({
count: 150,
interval: '30s',
metrics: [
'messages_published',
'messages_consumed',
'queue_depth',
'consumer_count',
'publish_rate',
'consume_rate',
'avg_latency_ms',
],
trend: 'up',
seasonality: false,
});
// Generate message data
const messages = await synth.generateStructured({
count: 1000,
schema: {
message_id: 'UUID',
queue_name: 'tasks | events | notifications | dead_letter',
priority: 'number (0-9)',
payload_size_bytes: 'number (100-10000)',
content_type: 'application/json | text/plain | application/octet-stream',
routing_key: 'routing key pattern',
headers: {
correlation_id: 'UUID',
reply_to: 'queue name or null',
timestamp: 'ISO timestamp',
retry_count: 'number (0-5)',
},
published_at: 'ISO timestamp',
consumed_at: 'ISO timestamp or null',
acknowledged_at: 'ISO timestamp or null',
status: 'pending | consumed | acknowledged | dead_letter | expired',
time_in_queue_ms: 'number (0-60000)',
consumer_id: 'consumer-{1-15} or null',
},
constraints: [
'Consumed messages should have consumed_at timestamp',
'Acknowledged messages should have acknowledged_at after consumed_at',
'5% of messages should be in dead_letter queue',
'Higher priority messages should have lower time_in_queue_ms',
],
});
// Generate consumer performance
const consumers = await synth.generateStructured({
count: 15,
schema: {
consumer_id: 'consumer-{1-15}',
queue_subscriptions: ['array of 1-3 queue names'],
messages_processed: 'number (0-200)',
processing_rate_per_second: 'number (1-50)',
error_count: 'number (0-20)',
avg_processing_time_ms: 'number (100-2000)',
prefetch_count: 'number (1-100)',
status: 'active | idle | error | disconnected',
last_activity: 'ISO timestamp',
},
});
console.log('Message Queue Analysis:');
console.log(`- Total messages: ${messages.data.length}`);
console.log(`- Pending: ${messages.data.filter((m: any) => m.status === 'pending').length}`);
console.log(`- Consumed: ${messages.data.filter((m: any) => m.status === 'consumed').length}`);
console.log(`- Dead letter: ${messages.data.filter((m: any) => m.status === 'dead_letter').length}`);
// Calculate throughput
const totalProcessed = consumers.data.reduce(
(sum: number, c: any) => sum + c.messages_processed,
0
);
const avgLatency = messages.data
.filter((m: any) => m.time_in_queue_ms)
.reduce((sum: number, m: any) => sum + m.time_in_queue_ms, 0) / messages.data.length;
console.log(`\nThroughput Metrics:`);
console.log(`- Total processed: ${totalProcessed}`);
console.log(`- Active consumers: ${consumers.data.filter((c: any) => c.status === 'active').length}`);
console.log(`- Average latency: ${avgLatency.toFixed(0)}ms`);
// RabbitMQ integration example
console.log('\nRabbitMQ Integration:');
console.log('// Connect to RabbitMQ');
console.log('const channel = await connection.createChannel();');
console.log('await channel.assertQueue("tasks", { durable: true });');
console.log('// Publish with agentic-synth data');
return { queueMetrics, messages, consumers };
}
// ============================================================================
// Example 4: Event-Driven Architecture
// ============================================================================
/**
* Generate event-driven architecture data (Kafka, EventBridge)
*/
export async function eventDrivenArchitecture() {
console.log('\n⚡ Example 4: Event-Driven Architecture\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate event streams
const events = await synth.generateEvents({
count: 2000,
eventTypes: [
'user.created',
'user.updated',
'order.placed',
'order.confirmed',
'order.shipped',
'payment.processed',
'inventory.updated',
'notification.sent',
],
schema: {
event_id: 'UUID',
event_type: 'one of eventTypes',
event_version: 'v1 | v2',
aggregate_id: 'UUID',
aggregate_type: 'user | order | payment | inventory',
payload: 'JSON object with event data',
metadata: {
causation_id: 'UUID (triggering event)',
correlation_id: 'UUID (workflow id)',
timestamp: 'ISO timestamp',
source_service: 'service name',
user_id: 'UUID or null',
},
partition_key: 'aggregate_id',
sequence_number: 'number',
published_at: 'ISO timestamp',
},
distribution: 'poisson',
timeRange: {
start: new Date(Date.now() - 3600000),
end: new Date(),
},
});
// Generate event handlers (subscribers)
const handlers = await synth.generateStructured({
count: 30,
schema: {
handler_id: 'UUID',
handler_name: 'descriptive name',
subscribed_events: ['array of 1-5 event types'],
service_name: 'service name',
processing_mode: 'sync | async | batch',
events_processed: 'number (0-500)',
events_failed: 'number (0-50)',
avg_processing_time_ms: 'number (50-2000)',
retry_policy: {
max_retries: 'number (3-10)',
backoff_strategy: 'exponential | linear | constant',
dead_letter_queue: 'boolean',
},
status: 'active | degraded | failing | disabled',
},
});
// Generate event projections (read models)
const projections = await synth.generateStructured({
count: 100,
schema: {
projection_id: 'UUID',
projection_type: 'user_profile | order_summary | inventory_view',
aggregate_id: 'UUID',
version: 'number (1-100)',
data: 'JSON object representing current state',
last_event_id: 'UUID (from events)',
last_updated: 'ISO timestamp',
consistency_lag_ms: 'number (0-5000)',
},
});
console.log('Event-Driven Architecture Analysis:');
console.log(`- Total events: ${events.data.length}`);
console.log(`- Event handlers: ${handlers.data.length}`);
console.log(`- Projections: ${projections.data.length}`);
// Event type distribution
const eventTypes = new Map<string, number>();
events.data.forEach((e: any) => {
eventTypes.set(e.event_type, (eventTypes.get(e.event_type) || 0) + 1);
});
console.log('\nEvent Distribution:');
eventTypes.forEach((count, type) => {
console.log(`- ${type}: ${count}`);
});
// Calculate average consistency lag
const avgLag = projections.data.reduce(
(sum: number, p: any) => sum + p.consistency_lag_ms,
0
) / projections.data.length;
console.log(`\nConsistency Metrics:`);
console.log(`- Average lag: ${avgLag.toFixed(0)}ms`);
console.log(`- Active handlers: ${handlers.data.filter((h: any) => h.status === 'active').length}`);
// Kafka integration
console.log('\nKafka Integration:');
console.log('// Produce events');
console.log('await producer.send({ topic: "events", messages: [...] });');
console.log('// Consume with handler coordination');
console.log('await consumer.run({ eachMessage: async ({ message }) => { /* ... */ } });');
return { events, handlers, projections };
}
// ============================================================================
// Example 5: Saga Pattern Transactions
// ============================================================================
/**
* Generate saga pattern distributed transaction data
*/
export async function sagaPatternTransactions() {
console.log('\n🔄 Example 5: Saga Pattern Transactions\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate saga executions
const sagas = await synth.generateStructured({
count: 100,
schema: {
saga_id: 'UUID',
saga_type: 'order_fulfillment | payment_processing | user_registration | booking_reservation',
orchestration_type: 'orchestration | choreography',
initiator_service: 'service name',
steps: [
{
step_id: 'UUID',
step_name: 'descriptive step name',
service: 'service name',
operation: 'operation name',
compensation_operation: 'compensation operation name',
status: 'pending | executing | completed | failed | compensating | compensated',
started_at: 'ISO timestamp or null',
completed_at: 'ISO timestamp or null',
duration_ms: 'number (100-5000)',
retry_count: 'number (0-3)',
},
],
overall_status: 'in_progress | completed | failed | compensated',
started_at: 'ISO timestamp',
completed_at: 'ISO timestamp or null',
total_duration_ms: 'number',
compensation_triggered: 'boolean',
compensation_reason: 'error description or null',
},
constraints: [
'Sagas should have 3-8 steps',
'Failed sagas should have compensation_triggered = true',
'Compensated steps should execute in reverse order',
'80% of sagas should complete successfully',
],
});
// Generate saga events
const sagaEvents = await synth.generateEvents({
count: 500,
eventTypes: [
'saga_started',
'step_started',
'step_completed',
'step_failed',
'compensation_started',
'compensation_completed',
'saga_completed',
'saga_failed',
],
schema: {
event_id: 'UUID',
saga_id: 'UUID (from sagas)',
step_id: 'UUID or null',
event_type: 'one of eventTypes',
service: 'service name',
payload: 'JSON object',
timestamp: 'ISO timestamp',
},
});
// Analyze saga patterns
const successfulSagas = sagas.data.filter((s: any) => s.overall_status === 'completed');
const failedSagas = sagas.data.filter((s: any) => s.overall_status === 'failed');
const compensatedSagas = sagas.data.filter((s: any) => s.overall_status === 'compensated');
console.log('Saga Pattern Analysis:');
console.log(`- Total sagas: ${sagas.data.length}`);
console.log(`- Successful: ${successfulSagas.length} (${((successfulSagas.length / sagas.data.length) * 100).toFixed(1)}%)`);
console.log(`- Failed: ${failedSagas.length}`);
console.log(`- Compensated: ${compensatedSagas.length}`);
// Calculate average steps and duration
const avgSteps = sagas.data.reduce((sum: number, s: any) => sum + s.steps.length, 0) / sagas.data.length;
const avgDuration = sagas.data
.filter((s: any) => s.completed_at)
.reduce((sum: number, s: any) => sum + s.total_duration_ms, 0) / successfulSagas.length;
console.log(`\nTransaction Metrics:`);
console.log(`- Average steps per saga: ${avgSteps.toFixed(1)}`);
console.log(`- Average duration: ${avgDuration.toFixed(0)}ms`);
console.log(`- Compensation rate: ${((compensatedSagas.length / sagas.data.length) * 100).toFixed(1)}%`);
// Orchestration vs Choreography
const orchestrated = sagas.data.filter((s: any) => s.orchestration_type === 'orchestration').length;
const choreographed = sagas.data.filter((s: any) => s.orchestration_type === 'choreography').length;
console.log(`\nOrchestration Style:`);
console.log(`- Orchestration: ${orchestrated}`);
console.log(`- Choreography: ${choreographed}`);
// Integration with coordination frameworks
console.log('\nIntegration Pattern:');
console.log('// MassTransit Saga State Machine');
console.log('class OrderSaga : MassTransitStateMachine<OrderState> { /* ... */ }');
console.log('// Or custom orchestrator with agentic-synth test data');
return { sagas, sagaEvents };
}
// ============================================================================
// Example 6: Stream Processing Pipeline
// ============================================================================
/**
* Generate stream processing pipeline data (Kafka Streams, Flink)
*/
export async function streamProcessingPipeline() {
console.log('\n🌊 Example 6: Stream Processing Pipeline\n');
const synth = createSynth({
provider: 'gemini',
apiKey: process.env.GEMINI_API_KEY || 'demo-key',
});
// Generate pipeline topology
const pipeline = await synth.generateStructured({
count: 1,
schema: {
pipeline_id: 'UUID',
pipeline_name: 'descriptive name',
stages: [
{
stage_id: 'UUID',
stage_type: 'source | transform | aggregate | filter | sink',
stage_name: 'stage name',
parallelism: 'number (1-20)',
input_topics: ['array of topic names'],
output_topics: ['array of topic names'],
transformation: 'description of transformation',
windowing: {
type: 'tumbling | sliding | session | global',
size_ms: 'number (1000-300000)',
slide_ms: 'number or null',
},
state_store: 'store name or null',
},
],
},
});
// Generate processing metrics
const metrics = await synth.generateTimeSeries({
count: 200,
interval: '30s',
metrics: [
'records_in',
'records_out',
'records_filtered',
'processing_latency_ms',
'watermark_lag_ms',
'cpu_usage',
'memory_mb',
],
trend: 'mixed',
});
// Generate windowed aggregations
const aggregations = await synth.generateStructured({
count: 150,
schema: {
window_id: 'UUID',
stage_id: 'UUID (from pipeline)',
window_start: 'ISO timestamp',
window_end: 'ISO timestamp',
record_count: 'number (100-10000)',
aggregate_values: {
sum: 'number',
avg: 'number',
min: 'number',
max: 'number',
count: 'number',
},
emitted_at: 'ISO timestamp',
late_arrivals: 'number (0-100)',
},
});
console.log('Stream Processing Analysis:');
console.log(`- Pipeline stages: ${pipeline.data[0].stages.length}`);
console.log(`- Metric points: ${metrics.data.length}`);
console.log(`- Window aggregations: ${aggregations.data.length}`);
// Calculate throughput
const avgRecordsIn = metrics.data.reduce((sum: number, m: any) => sum + m.records_in, 0) / metrics.data.length;
const avgRecordsOut = metrics.data.reduce((sum: number, m: any) => sum + m.records_out, 0) / metrics.data.length;
console.log(`\nThroughput:`);
console.log(`- Input: ${avgRecordsIn.toFixed(0)} records/interval`);
console.log(`- Output: ${avgRecordsOut.toFixed(0)} records/interval`);
console.log(`- Filter rate: ${(((avgRecordsIn - avgRecordsOut) / avgRecordsIn) * 100).toFixed(1)}%`);
console.log('\nApache Flink Integration:');
console.log('// Define stream processing job');
console.log('env.addSource(kafkaSource).keyBy(...).window(...).aggregate(...).addSink(kafkaSink);');
return { pipeline, metrics, aggregations };
}
// ============================================================================
// Run All Examples
// ============================================================================
export async function runAllDistributedProcessingExamples() {
console.log('🚀 Running All Distributed Processing Examples\n');
console.log('='.repeat(70));
try {
await mapReduceJobData();
console.log('='.repeat(70));
await workerPoolSimulation();
console.log('='.repeat(70));
await messageQueueScenarios();
console.log('='.repeat(70));
await eventDrivenArchitecture();
console.log('='.repeat(70));
await sagaPatternTransactions();
console.log('='.repeat(70));
await streamProcessingPipeline();
console.log('='.repeat(70));
console.log('\n✅ All distributed processing examples completed!\n');
} catch (error: any) {
console.error('❌ Error running examples:', error.message);
throw error;
}
}
// Run if executed directly
if (import.meta.url === `file://${process.argv[1]}`) {
runAllDistributedProcessingExamples().catch(console.error);
}