Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,cAAc,SAAS,CAAC;AACxB,cAAc,YAAY,CAAC;AAC3B,cAAc,aAAa,CAAC"}

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":";AAAA;;;;GAIG;;;;;;;;;;;;;;;;AAEH,0CAAwB;AACxB,6CAA2B;AAC3B,8CAA4B"}

View File

@@ -0,0 +1,9 @@
/**
* Integration Context - Slack, Webhooks, Providers
*
* External system connectivity and protocol adapters.
*/
export * from './slack';
export * from './webhooks';
export * from './providers';

View File

@@ -0,0 +1,48 @@
/**
* AnthropicProvider - Anthropic Claude LLM Integration
*
* Provides Claude AI models (claude-3-opus, claude-3-sonnet, claude-3-haiku)
* with streaming, tool use, and full API support.
*/
import type { LLMProvider, Message, CompletionOptions, StreamOptions, Completion, Token, ModelInfo } from './index.js';
export interface AnthropicConfig {
apiKey: string;
baseUrl?: string;
model?: string;
maxRetries?: number;
timeout?: number;
}
export type AnthropicModel = 'claude-opus-4-20250514' | 'claude-sonnet-4-20250514' | 'claude-3-5-sonnet-20241022' | 'claude-3-5-haiku-20241022' | 'claude-3-opus-20240229' | 'claude-3-sonnet-20240229' | 'claude-3-haiku-20240307';
export declare class AnthropicProvider implements LLMProvider {
private readonly config;
private readonly model;
constructor(config: AnthropicConfig);
/**
* Complete a conversation
*/
complete(messages: Message[], options?: CompletionOptions): Promise<Completion>;
/**
* Stream a conversation
*/
stream(messages: Message[], options?: StreamOptions): AsyncGenerator<Token, Completion, void>;
/**
* Count tokens in text
*/
countTokens(text: string): Promise<number>;
/**
* Get model info
*/
getModel(): ModelInfo;
/**
* Check provider health
*/
isHealthy(): Promise<boolean>;
private convertMessages;
private convertTools;
private convertResponse;
private makeRequest;
private makeStreamRequest;
}
export declare function createAnthropicProvider(config: AnthropicConfig): AnthropicProvider;
export default AnthropicProvider;
//# sourceMappingURL=AnthropicProvider.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"AnthropicProvider.d.ts","sourceRoot":"","sources":["AnthropicProvider.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,EACV,WAAW,EACX,OAAO,EACP,iBAAiB,EACjB,aAAa,EACb,UAAU,EACV,KAAK,EACL,SAAS,EAGV,MAAM,YAAY,CAAC;AAMpB,MAAM,WAAW,eAAe;IAC9B,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,OAAO,CAAC,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,cAAc,GACtB,wBAAwB,GACxB,0BAA0B,GAC1B,4BAA4B,GAC5B,2BAA2B,GAC3B,wBAAwB,GACxB,0BAA0B,GAC1B,yBAAyB,CAAC;AAwG9B,qBAAa,iBAAkB,YAAW,WAAW;IACnD,OAAO,CAAC,QAAQ,CAAC,MAAM,CAA4B;IACnD,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAiB;gBAE3B,MAAM,EAAE,eAAe;IAWnC;;OAEG;IACG,QAAQ,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,iBAAiB,GAAG,OAAO,CAAC,UAAU,CAAC;IAcrF;;OAEG;IACI,MAAM,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,aAAa,GAAG,cAAc,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,CAAC;IA2CpG;;OAEG;IACG,WAAW,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAKhD;;OAEG;IACH,QAAQ,IAAI,SAAS;IAIrB;;OAEG;IACG,SAAS,IAAI,OAAO,CAAC,OAAO,CAAC;IAcnC,OAAO,CAAC,eAAe;IA0BvB,OAAO,CAAC,YAAY;IAQpB,OAAO,CAAC,eAAe;YA+BT,WAAW;YAsBV,iBAAiB;CAsDjC;AAMD,wBAAgB,uBAAuB,CAAC,MAAM,EAAE,eAAe,GAAG,iBAAiB,CAElF;AAED,eAAe,iBAAiB,CAAC"}

View File

@@ -0,0 +1,295 @@
"use strict";
/**
* AnthropicProvider - Anthropic Claude LLM Integration
*
* Provides Claude AI models (claude-3-opus, claude-3-sonnet, claude-3-haiku)
* with streaming, tool use, and full API support.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.AnthropicProvider = void 0;
exports.createAnthropicProvider = createAnthropicProvider;
// ============================================================================
// Model Info
// ============================================================================
const MODEL_INFO = {
'claude-opus-4-20250514': {
id: 'claude-opus-4-20250514',
name: 'Claude Opus 4',
maxTokens: 32768,
contextWindow: 200000,
},
'claude-sonnet-4-20250514': {
id: 'claude-sonnet-4-20250514',
name: 'Claude Sonnet 4',
maxTokens: 16384,
contextWindow: 200000,
},
'claude-3-5-sonnet-20241022': {
id: 'claude-3-5-sonnet-20241022',
name: 'Claude 3.5 Sonnet',
maxTokens: 8192,
contextWindow: 200000,
},
'claude-3-5-haiku-20241022': {
id: 'claude-3-5-haiku-20241022',
name: 'Claude 3.5 Haiku',
maxTokens: 8192,
contextWindow: 200000,
},
'claude-3-opus-20240229': {
id: 'claude-3-opus-20240229',
name: 'Claude 3 Opus',
maxTokens: 4096,
contextWindow: 200000,
},
'claude-3-sonnet-20240229': {
id: 'claude-3-sonnet-20240229',
name: 'Claude 3 Sonnet',
maxTokens: 4096,
contextWindow: 200000,
},
'claude-3-haiku-20240307': {
id: 'claude-3-haiku-20240307',
name: 'Claude 3 Haiku',
maxTokens: 4096,
contextWindow: 200000,
},
};
// ============================================================================
// AnthropicProvider Implementation
// ============================================================================
class AnthropicProvider {
constructor(config) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl ?? 'https://api.anthropic.com',
model: config.model ?? 'claude-3-5-sonnet-20241022',
maxRetries: config.maxRetries ?? 3,
timeout: config.timeout ?? 60000,
};
this.model = this.config.model;
}
/**
* Complete a conversation
*/
async complete(messages, options) {
const response = await this.makeRequest('/v1/messages', {
model: this.model,
max_tokens: options?.maxTokens ?? MODEL_INFO[this.model].maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop_sequences: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
});
return this.convertResponse(response);
}
/**
* Stream a conversation
*/
async *stream(messages, options) {
const response = await this.makeStreamRequest('/v1/messages', {
model: this.model,
max_tokens: options?.maxTokens ?? MODEL_INFO[this.model].maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop_sequences: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
stream: true,
});
let fullContent = '';
let inputTokens = 0;
let outputTokens = 0;
const toolCalls = [];
let finishReason = 'stop';
for await (const event of response) {
if (event.type === 'content_block_delta' && event.delta?.text) {
fullContent += event.delta.text;
options?.onToken?.(event.delta.text);
yield { type: 'text', text: event.delta.text };
}
else if (event.type === 'message_delta') {
if (event.usage?.output_tokens) {
outputTokens = event.usage.output_tokens;
}
if (event.message?.stop_reason === 'tool_use') {
finishReason = 'tool_use';
}
}
else if (event.type === 'message_start' && event.message?.usage) {
inputTokens = event.message.usage.input_tokens;
}
}
return {
content: fullContent,
finishReason,
usage: { inputTokens, outputTokens },
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
/**
* Count tokens in text
*/
async countTokens(text) {
// Approximate token count (Claude uses ~4 chars per token on average)
return Math.ceil(text.length / 4);
}
/**
* Get model info
*/
getModel() {
return MODEL_INFO[this.model] ?? MODEL_INFO['claude-3-5-sonnet-20241022'];
}
/**
* Check provider health
*/
async isHealthy() {
try {
// Simple health check - try to count tokens
await this.countTokens('health check');
return true;
}
catch {
return false;
}
}
// ==========================================================================
// Private Methods
// ==========================================================================
convertMessages(messages) {
const anthropicMessages = [];
let systemPrompt = '';
for (const msg of messages) {
if (msg.role === 'system') {
systemPrompt += (systemPrompt ? '\n' : '') + msg.content;
}
else {
anthropicMessages.push({
role: msg.role,
content: msg.content,
});
}
}
// Prepend system prompt to first user message if exists
if (systemPrompt && anthropicMessages.length > 0 && anthropicMessages[0].role === 'user') {
const firstContent = anthropicMessages[0].content;
if (typeof firstContent === 'string') {
anthropicMessages[0].content = `${systemPrompt}\n\n${firstContent}`;
}
}
return anthropicMessages;
}
convertTools(tools) {
return tools.map(tool => ({
name: tool.name,
description: tool.description,
input_schema: tool.parameters,
}));
}
convertResponse(response) {
let content = '';
const toolCalls = [];
for (const block of response.content) {
if (block.type === 'text' && block.text) {
content += block.text;
}
else if (block.type === 'tool_use' && block.id && block.name) {
toolCalls.push({
id: block.id,
name: block.name,
input: block.input ?? {},
});
}
}
let finishReason = 'stop';
if (response.stop_reason === 'max_tokens')
finishReason = 'length';
if (response.stop_reason === 'tool_use')
finishReason = 'tool_use';
return {
content,
finishReason,
usage: {
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
},
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
async makeRequest(endpoint, body) {
const url = `${this.config.baseUrl}${endpoint}`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
},
body: JSON.stringify(body),
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
}
return response.json();
}
async *makeStreamRequest(endpoint, body) {
const url = `${this.config.baseUrl}${endpoint}`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
},
body: JSON.stringify(body),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let buffer = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done)
break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]')
return;
try {
yield JSON.parse(data);
}
catch {
// Skip invalid JSON
}
}
}
}
}
finally {
reader.releaseLock();
}
}
}
exports.AnthropicProvider = AnthropicProvider;
// ============================================================================
// Factory Function
// ============================================================================
function createAnthropicProvider(config) {
return new AnthropicProvider(config);
}
exports.default = AnthropicProvider;
//# sourceMappingURL=AnthropicProvider.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,404 @@
/**
* AnthropicProvider - Anthropic Claude LLM Integration
*
* Provides Claude AI models (claude-3-opus, claude-3-sonnet, claude-3-haiku)
* with streaming, tool use, and full API support.
*/
import type {
LLMProvider,
Message,
CompletionOptions,
StreamOptions,
Completion,
Token,
ModelInfo,
Tool,
ToolCall,
} from './index.js';
// ============================================================================
// Types
// ============================================================================
export interface AnthropicConfig {
apiKey: string;
baseUrl?: string;
model?: string;
maxRetries?: number;
timeout?: number;
}
export type AnthropicModel =
| 'claude-opus-4-20250514'
| 'claude-sonnet-4-20250514'
| 'claude-3-5-sonnet-20241022'
| 'claude-3-5-haiku-20241022'
| 'claude-3-opus-20240229'
| 'claude-3-sonnet-20240229'
| 'claude-3-haiku-20240307';
interface AnthropicMessage {
role: 'user' | 'assistant';
content: string | AnthropicContent[];
}
interface AnthropicContent {
type: 'text' | 'tool_use' | 'tool_result';
text?: string;
id?: string;
name?: string;
input?: Record<string, unknown>;
}
interface AnthropicTool {
name: string;
description: string;
input_schema: Record<string, unknown>;
}
interface AnthropicResponse {
id: string;
type: 'message';
role: 'assistant';
content: AnthropicContent[];
model: string;
stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use';
stop_sequence?: string;
usage: {
input_tokens: number;
output_tokens: number;
};
}
interface AnthropicStreamEvent {
type: string;
index?: number;
delta?: {
type: string;
text?: string;
partial_json?: string;
};
content_block?: AnthropicContent;
message?: AnthropicResponse;
usage?: {
input_tokens?: number;
output_tokens?: number;
};
}
// ============================================================================
// Model Info
// ============================================================================
const MODEL_INFO: Record<AnthropicModel, ModelInfo> = {
'claude-opus-4-20250514': {
id: 'claude-opus-4-20250514',
name: 'Claude Opus 4',
maxTokens: 32768,
contextWindow: 200000,
},
'claude-sonnet-4-20250514': {
id: 'claude-sonnet-4-20250514',
name: 'Claude Sonnet 4',
maxTokens: 16384,
contextWindow: 200000,
},
'claude-3-5-sonnet-20241022': {
id: 'claude-3-5-sonnet-20241022',
name: 'Claude 3.5 Sonnet',
maxTokens: 8192,
contextWindow: 200000,
},
'claude-3-5-haiku-20241022': {
id: 'claude-3-5-haiku-20241022',
name: 'Claude 3.5 Haiku',
maxTokens: 8192,
contextWindow: 200000,
},
'claude-3-opus-20240229': {
id: 'claude-3-opus-20240229',
name: 'Claude 3 Opus',
maxTokens: 4096,
contextWindow: 200000,
},
'claude-3-sonnet-20240229': {
id: 'claude-3-sonnet-20240229',
name: 'Claude 3 Sonnet',
maxTokens: 4096,
contextWindow: 200000,
},
'claude-3-haiku-20240307': {
id: 'claude-3-haiku-20240307',
name: 'Claude 3 Haiku',
maxTokens: 4096,
contextWindow: 200000,
},
};
// ============================================================================
// AnthropicProvider Implementation
// ============================================================================
export class AnthropicProvider implements LLMProvider {
private readonly config: Required<AnthropicConfig>;
private readonly model: AnthropicModel;
constructor(config: AnthropicConfig) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl ?? 'https://api.anthropic.com',
model: config.model ?? 'claude-3-5-sonnet-20241022',
maxRetries: config.maxRetries ?? 3,
timeout: config.timeout ?? 60000,
};
this.model = this.config.model as AnthropicModel;
}
/**
* Complete a conversation
*/
async complete(messages: Message[], options?: CompletionOptions): Promise<Completion> {
const response = await this.makeRequest<AnthropicResponse>('/v1/messages', {
model: this.model,
max_tokens: options?.maxTokens ?? MODEL_INFO[this.model].maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop_sequences: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
});
return this.convertResponse(response);
}
/**
* Stream a conversation
*/
async *stream(messages: Message[], options?: StreamOptions): AsyncGenerator<Token, Completion, void> {
const response = await this.makeStreamRequest('/v1/messages', {
model: this.model,
max_tokens: options?.maxTokens ?? MODEL_INFO[this.model].maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop_sequences: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
stream: true,
});
let fullContent = '';
let inputTokens = 0;
let outputTokens = 0;
const toolCalls: ToolCall[] = [];
let finishReason: Completion['finishReason'] = 'stop';
for await (const event of response) {
if (event.type === 'content_block_delta' && event.delta?.text) {
fullContent += event.delta.text;
options?.onToken?.(event.delta.text);
yield { type: 'text', text: event.delta.text };
} else if (event.type === 'message_delta') {
if (event.usage?.output_tokens) {
outputTokens = event.usage.output_tokens;
}
if (event.message?.stop_reason === 'tool_use') {
finishReason = 'tool_use';
}
} else if (event.type === 'message_start' && event.message?.usage) {
inputTokens = event.message.usage.input_tokens;
}
}
return {
content: fullContent,
finishReason,
usage: { inputTokens, outputTokens },
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
/**
* Count tokens in text
*/
async countTokens(text: string): Promise<number> {
// Approximate token count (Claude uses ~4 chars per token on average)
return Math.ceil(text.length / 4);
}
/**
* Get model info
*/
getModel(): ModelInfo {
return MODEL_INFO[this.model] ?? MODEL_INFO['claude-3-5-sonnet-20241022'];
}
/**
* Check provider health
*/
async isHealthy(): Promise<boolean> {
try {
// Simple health check - try to count tokens
await this.countTokens('health check');
return true;
} catch {
return false;
}
}
// ==========================================================================
// Private Methods
// ==========================================================================
private convertMessages(messages: Message[]): AnthropicMessage[] {
const anthropicMessages: AnthropicMessage[] = [];
let systemPrompt = '';
for (const msg of messages) {
if (msg.role === 'system') {
systemPrompt += (systemPrompt ? '\n' : '') + msg.content;
} else {
anthropicMessages.push({
role: msg.role,
content: msg.content,
});
}
}
// Prepend system prompt to first user message if exists
if (systemPrompt && anthropicMessages.length > 0 && anthropicMessages[0].role === 'user') {
const firstContent = anthropicMessages[0].content;
if (typeof firstContent === 'string') {
anthropicMessages[0].content = `${systemPrompt}\n\n${firstContent}`;
}
}
return anthropicMessages;
}
private convertTools(tools: Tool[]): AnthropicTool[] {
return tools.map(tool => ({
name: tool.name,
description: tool.description,
input_schema: tool.parameters,
}));
}
private convertResponse(response: AnthropicResponse): Completion {
let content = '';
const toolCalls: ToolCall[] = [];
for (const block of response.content) {
if (block.type === 'text' && block.text) {
content += block.text;
} else if (block.type === 'tool_use' && block.id && block.name) {
toolCalls.push({
id: block.id,
name: block.name,
input: block.input ?? {},
});
}
}
let finishReason: Completion['finishReason'] = 'stop';
if (response.stop_reason === 'max_tokens') finishReason = 'length';
if (response.stop_reason === 'tool_use') finishReason = 'tool_use';
return {
content,
finishReason,
usage: {
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
},
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
private async makeRequest<T>(endpoint: string, body: Record<string, unknown>): Promise<T> {
const url = `${this.config.baseUrl}${endpoint}`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
},
body: JSON.stringify(body),
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
}
return response.json() as Promise<T>;
}
private async *makeStreamRequest(
endpoint: string,
body: Record<string, unknown>
): AsyncGenerator<AnthropicStreamEvent, void, void> {
const url = `${this.config.baseUrl}${endpoint}`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
},
body: JSON.stringify(body),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let buffer = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') return;
try {
yield JSON.parse(data) as AnthropicStreamEvent;
} catch {
// Skip invalid JSON
}
}
}
}
} finally {
reader.releaseLock();
}
}
}
// ============================================================================
// Factory Function
// ============================================================================
export function createAnthropicProvider(config: AnthropicConfig): AnthropicProvider {
return new AnthropicProvider(config);
}
export default AnthropicProvider;

View File

@@ -0,0 +1,32 @@
/**
* GoogleAIProvider - Google AI (Gemini) LLM Integration
*
* Provides direct access to Google's Gemini models using the Google AI API.
* Supports both API key authentication and Google Cloud default credentials.
*/
import type { LLMProvider, Message, CompletionOptions, StreamOptions, Completion, Token, ModelInfo } from './index.js';
export interface GoogleAIConfig {
apiKey?: string;
projectId?: string;
location?: string;
model?: string;
maxRetries?: number;
timeout?: number;
}
export type GoogleAIModel = 'gemini-3-pro-preview' | 'gemini-3-flash-preview' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-image' | 'gemini-2.0-flash' | 'gemini-2.0-flash-lite' | 'gemini-1.5-pro' | 'gemini-1.5-flash' | 'gemini-1.5-flash-8b' | string;
export declare class GoogleAIProvider implements LLMProvider {
private readonly config;
private readonly baseUrl;
private readonly modelId;
constructor(config: GoogleAIConfig);
complete(messages: Message[], options?: CompletionOptions): Promise<Completion>;
stream(messages: Message[], options?: StreamOptions): AsyncGenerator<Token, Completion, void>;
countTokens(text: string): Promise<number>;
getModel(): ModelInfo;
isHealthy(): Promise<boolean>;
private convertMessages;
private extractSystemInstruction;
private mapFinishReason;
}
export declare function createGoogleAIProvider(config: GoogleAIConfig): LLMProvider;
//# sourceMappingURL=GoogleAIProvider.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"GoogleAIProvider.d.ts","sourceRoot":"","sources":["GoogleAIProvider.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,EACV,WAAW,EACX,OAAO,EACP,iBAAiB,EACjB,aAAa,EACb,UAAU,EACV,KAAK,EACL,SAAS,EAGV,MAAM,YAAY,CAAC;AAMpB,MAAM,WAAW,cAAc;IAC7B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,OAAO,CAAC,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,aAAa,GAErB,sBAAsB,GACtB,wBAAwB,GAExB,gBAAgB,GAChB,kBAAkB,GAClB,uBAAuB,GACvB,wBAAwB,GAExB,kBAAkB,GAClB,uBAAuB,GAEvB,gBAAgB,GAChB,kBAAkB,GAClB,qBAAqB,GACrB,MAAM,CAAC;AA2GX,qBAAa,gBAAiB,YAAW,WAAW;IAClD,OAAO,CAAC,QAAQ,CAAC,MAAM,CAA2B;IAClD,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAS;IACjC,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAS;gBAErB,MAAM,EAAE,cAAc;IAa5B,QAAQ,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,iBAAiB,GAAG,OAAO,CAAC,UAAU,CAAC;IA4E9E,MAAM,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,aAAa,GAAG,cAAc,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,CAAC;IAqF9F,WAAW,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAKhD,QAAQ,IAAI,SAAS;IASf,SAAS,IAAI,OAAO,CAAC,OAAO,CAAC;IASnC,OAAO,CAAC,eAAe;IASvB,OAAO,CAAC,wBAAwB;IAKhC,OAAO,CAAC,eAAe;CAaxB;AAMD,wBAAgB,sBAAsB,CAAC,MAAM,EAAE,cAAc,GAAG,WAAW,CAE1E"}

View File

@@ -0,0 +1,296 @@
"use strict";
/**
* GoogleAIProvider - Google AI (Gemini) LLM Integration
*
* Provides direct access to Google's Gemini models using the Google AI API.
* Supports both API key authentication and Google Cloud default credentials.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.GoogleAIProvider = void 0;
exports.createGoogleAIProvider = createGoogleAIProvider;
// ============================================================================
// Model Info
// ============================================================================
const MODEL_INFO = {
// Gemini 3.x models (preview)
'gemini-3-pro-preview': {
id: 'gemini-3-pro-preview',
name: 'Gemini 3 Pro Preview',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-3-flash-preview': {
id: 'gemini-3-flash-preview',
name: 'Gemini 3 Flash Preview',
maxTokens: 65536,
contextWindow: 1000000,
},
// Gemini 2.5 models (stable)
'gemini-2.5-pro': {
id: 'gemini-2.5-pro',
name: 'Gemini 2.5 Pro',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-2.5-flash': {
id: 'gemini-2.5-flash',
name: 'Gemini 2.5 Flash',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-2.5-flash-lite': {
id: 'gemini-2.5-flash-lite',
name: 'Gemini 2.5 Flash Lite',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-2.5-flash-image': {
id: 'gemini-2.5-flash-image',
name: 'Gemini 2.5 Flash Image',
maxTokens: 65536,
contextWindow: 1000000,
},
// Gemini 2.0 models (deprecated March 2026)
'gemini-2.0-flash': {
id: 'gemini-2.0-flash',
name: 'Gemini 2.0 Flash',
maxTokens: 8192,
contextWindow: 1000000,
},
'gemini-2.0-flash-lite': {
id: 'gemini-2.0-flash-lite',
name: 'Gemini 2.0 Flash Lite',
maxTokens: 8192,
contextWindow: 1000000,
},
// Gemini 1.5 models
'gemini-1.5-pro': {
id: 'gemini-1.5-pro',
name: 'Gemini 1.5 Pro',
maxTokens: 8192,
contextWindow: 2000000,
},
'gemini-1.5-flash': {
id: 'gemini-1.5-flash',
name: 'Gemini 1.5 Flash',
maxTokens: 8192,
contextWindow: 1000000,
},
'gemini-1.5-flash-8b': {
id: 'gemini-1.5-flash-8b',
name: 'Gemini 1.5 Flash 8B',
maxTokens: 8192,
contextWindow: 1000000,
},
};
// ============================================================================
// Provider Implementation
// ============================================================================
class GoogleAIProvider {
constructor(config) {
this.config = {
apiKey: config.apiKey || process.env.GOOGLE_AI_API_KEY || process.env.GEMINI_API_KEY || '',
projectId: config.projectId || process.env.GOOGLE_CLOUD_PROJECT || '',
location: config.location || 'us-central1',
model: config.model || 'gemini-2.0-flash',
maxRetries: config.maxRetries ?? 3,
timeout: config.timeout ?? 60000,
};
this.modelId = this.config.model;
this.baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
}
async complete(messages, options) {
const geminiMessages = this.convertMessages(messages);
const systemInstruction = this.extractSystemInstruction(messages);
const requestBody = {
contents: geminiMessages,
generationConfig: {
temperature: options?.temperature ?? 0.7,
maxOutputTokens: options?.maxTokens ?? 4096,
topP: options?.topP ?? 0.95,
},
};
if (systemInstruction) {
requestBody.systemInstruction = {
parts: [{ text: systemInstruction }],
};
}
if (options?.tools && options.tools.length > 0) {
requestBody.tools = [{
functionDeclarations: options.tools.map(tool => ({
name: tool.name,
description: tool.description,
parameters: tool.parameters,
})),
}];
}
const url = `${this.baseUrl}/models/${this.modelId}:generateContent?key=${this.config.apiKey}`;
let lastError = null;
for (let attempt = 0; attempt < this.config.maxRetries; attempt++) {
try {
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Google AI API error (${response.status}): ${errorText}`);
}
const data = await response.json();
if (!data.candidates || data.candidates.length === 0) {
throw new Error('No response from Google AI');
}
const candidate = data.candidates[0];
const content = candidate.content.parts.map(p => p.text).join('');
const finishReason = this.mapFinishReason(candidate.finishReason);
return {
content,
finishReason,
usage: {
inputTokens: data.usageMetadata?.promptTokenCount ?? 0,
outputTokens: data.usageMetadata?.candidatesTokenCount ?? 0,
},
};
}
catch (error) {
lastError = error;
if (attempt < this.config.maxRetries - 1) {
await new Promise(resolve => setTimeout(resolve, 1000 * (attempt + 1)));
}
}
}
throw lastError || new Error('Failed to complete request');
}
async *stream(messages, options) {
const geminiMessages = this.convertMessages(messages);
const systemInstruction = this.extractSystemInstruction(messages);
const requestBody = {
contents: geminiMessages,
generationConfig: {
temperature: options?.temperature ?? 0.7,
maxOutputTokens: options?.maxTokens ?? 4096,
},
};
if (systemInstruction) {
requestBody.systemInstruction = {
parts: [{ text: systemInstruction }],
};
}
const url = `${this.baseUrl}/models/${this.modelId}:streamGenerateContent?key=${this.config.apiKey}&alt=sse`;
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Google AI API error (${response.status}): ${errorText}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let buffer = '';
let fullContent = '';
let inputTokens = 0;
let outputTokens = 0;
try {
while (true) {
const { done, value } = await reader.read();
if (done)
break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]')
continue;
try {
const json = JSON.parse(data);
if (json.candidates?.[0]?.content?.parts?.[0]?.text) {
const text = json.candidates[0].content.parts[0].text;
fullContent += text;
options?.onToken?.(text);
yield { type: 'text', text };
}
if (json.usageMetadata) {
inputTokens = json.usageMetadata.promptTokenCount;
outputTokens = json.usageMetadata.candidatesTokenCount;
}
}
catch {
// Skip invalid JSON
}
}
}
}
}
finally {
reader.releaseLock();
}
return {
content: fullContent,
finishReason: 'stop',
usage: { inputTokens, outputTokens },
};
}
async countTokens(text) {
// Approximate token count (Gemini uses ~4 chars per token on average)
return Math.ceil(text.length / 4);
}
getModel() {
return MODEL_INFO[this.modelId] ?? {
id: this.modelId,
name: this.modelId,
maxTokens: 8192,
contextWindow: 1000000,
};
}
async isHealthy() {
try {
await this.countTokens('health check');
return !!this.config.apiKey;
}
catch {
return false;
}
}
convertMessages(messages) {
return messages
.filter(m => m.role !== 'system')
.map(m => ({
role: m.role === 'assistant' ? 'model' : 'user',
parts: [{ text: m.content }],
}));
}
extractSystemInstruction(messages) {
const systemMessage = messages.find(m => m.role === 'system');
return systemMessage?.content || null;
}
mapFinishReason(reason) {
switch (reason) {
case 'STOP':
case 'END_TURN':
return 'stop';
case 'MAX_TOKENS':
return 'length';
case 'TOOL_CALL':
return 'tool_use';
default:
return 'stop';
}
}
}
exports.GoogleAIProvider = GoogleAIProvider;
// ============================================================================
// Factory Function
// ============================================================================
function createGoogleAIProvider(config) {
return new GoogleAIProvider(config);
}
//# sourceMappingURL=GoogleAIProvider.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,393 @@
/**
* GoogleAIProvider - Google AI (Gemini) LLM Integration
*
* Provides direct access to Google's Gemini models using the Google AI API.
* Supports both API key authentication and Google Cloud default credentials.
*/
import type {
LLMProvider,
Message,
CompletionOptions,
StreamOptions,
Completion,
Token,
ModelInfo,
Tool,
ToolCall,
} from './index.js';
// ============================================================================
// Types
// ============================================================================
export interface GoogleAIConfig {
apiKey?: string;
projectId?: string;
location?: string;
model?: string;
maxRetries?: number;
timeout?: number;
}
export type GoogleAIModel =
// Gemini 3.x models (preview)
| 'gemini-3-pro-preview'
| 'gemini-3-flash-preview'
// Gemini 2.5 models (stable)
| 'gemini-2.5-pro'
| 'gemini-2.5-flash'
| 'gemini-2.5-flash-lite'
| 'gemini-2.5-flash-image'
// Gemini 2.0 models (deprecated March 2026)
| 'gemini-2.0-flash'
| 'gemini-2.0-flash-lite'
// Gemini 1.5 models
| 'gemini-1.5-pro'
| 'gemini-1.5-flash'
| 'gemini-1.5-flash-8b'
| string;
interface GeminiMessage {
role: 'user' | 'model';
parts: { text: string }[];
}
interface GeminiResponse {
candidates: {
content: {
parts: { text: string }[];
role: string;
};
finishReason: string;
safetyRatings?: Array<{
category: string;
probability: string;
}>;
}[];
usageMetadata?: {
promptTokenCount: number;
candidatesTokenCount: number;
totalTokenCount: number;
};
}
// ============================================================================
// Model Info
// ============================================================================
const MODEL_INFO: Record<string, ModelInfo> = {
// Gemini 3.x models (preview)
'gemini-3-pro-preview': {
id: 'gemini-3-pro-preview',
name: 'Gemini 3 Pro Preview',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-3-flash-preview': {
id: 'gemini-3-flash-preview',
name: 'Gemini 3 Flash Preview',
maxTokens: 65536,
contextWindow: 1000000,
},
// Gemini 2.5 models (stable)
'gemini-2.5-pro': {
id: 'gemini-2.5-pro',
name: 'Gemini 2.5 Pro',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-2.5-flash': {
id: 'gemini-2.5-flash',
name: 'Gemini 2.5 Flash',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-2.5-flash-lite': {
id: 'gemini-2.5-flash-lite',
name: 'Gemini 2.5 Flash Lite',
maxTokens: 65536,
contextWindow: 1000000,
},
'gemini-2.5-flash-image': {
id: 'gemini-2.5-flash-image',
name: 'Gemini 2.5 Flash Image',
maxTokens: 65536,
contextWindow: 1000000,
},
// Gemini 2.0 models (deprecated March 2026)
'gemini-2.0-flash': {
id: 'gemini-2.0-flash',
name: 'Gemini 2.0 Flash',
maxTokens: 8192,
contextWindow: 1000000,
},
'gemini-2.0-flash-lite': {
id: 'gemini-2.0-flash-lite',
name: 'Gemini 2.0 Flash Lite',
maxTokens: 8192,
contextWindow: 1000000,
},
// Gemini 1.5 models
'gemini-1.5-pro': {
id: 'gemini-1.5-pro',
name: 'Gemini 1.5 Pro',
maxTokens: 8192,
contextWindow: 2000000,
},
'gemini-1.5-flash': {
id: 'gemini-1.5-flash',
name: 'Gemini 1.5 Flash',
maxTokens: 8192,
contextWindow: 1000000,
},
'gemini-1.5-flash-8b': {
id: 'gemini-1.5-flash-8b',
name: 'Gemini 1.5 Flash 8B',
maxTokens: 8192,
contextWindow: 1000000,
},
};
// ============================================================================
// Provider Implementation
// ============================================================================
export class GoogleAIProvider implements LLMProvider {
private readonly config: Required<GoogleAIConfig>;
private readonly baseUrl: string;
private readonly modelId: string;
constructor(config: GoogleAIConfig) {
this.config = {
apiKey: config.apiKey || process.env.GOOGLE_AI_API_KEY || process.env.GEMINI_API_KEY || '',
projectId: config.projectId || process.env.GOOGLE_CLOUD_PROJECT || '',
location: config.location || 'us-central1',
model: config.model || 'gemini-2.0-flash',
maxRetries: config.maxRetries ?? 3,
timeout: config.timeout ?? 60000,
};
this.modelId = this.config.model;
this.baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
}
async complete(messages: Message[], options?: CompletionOptions): Promise<Completion> {
const geminiMessages = this.convertMessages(messages);
const systemInstruction = this.extractSystemInstruction(messages);
const requestBody: Record<string, unknown> = {
contents: geminiMessages,
generationConfig: {
temperature: options?.temperature ?? 0.7,
maxOutputTokens: options?.maxTokens ?? 4096,
topP: options?.topP ?? 0.95,
},
};
if (systemInstruction) {
requestBody.systemInstruction = {
parts: [{ text: systemInstruction }],
};
}
if (options?.tools && options.tools.length > 0) {
requestBody.tools = [{
functionDeclarations: options.tools.map(tool => ({
name: tool.name,
description: tool.description,
parameters: tool.parameters,
})),
}];
}
const url = `${this.baseUrl}/models/${this.modelId}:generateContent?key=${this.config.apiKey}`;
let lastError: Error | null = null;
for (let attempt = 0; attempt < this.config.maxRetries; attempt++) {
try {
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Google AI API error (${response.status}): ${errorText}`);
}
const data = await response.json() as GeminiResponse;
if (!data.candidates || data.candidates.length === 0) {
throw new Error('No response from Google AI');
}
const candidate = data.candidates[0];
const content = candidate.content.parts.map(p => p.text).join('');
const finishReason = this.mapFinishReason(candidate.finishReason);
return {
content,
finishReason,
usage: {
inputTokens: data.usageMetadata?.promptTokenCount ?? 0,
outputTokens: data.usageMetadata?.candidatesTokenCount ?? 0,
},
};
} catch (error) {
lastError = error as Error;
if (attempt < this.config.maxRetries - 1) {
await new Promise(resolve => setTimeout(resolve, 1000 * (attempt + 1)));
}
}
}
throw lastError || new Error('Failed to complete request');
}
async *stream(messages: Message[], options?: StreamOptions): AsyncGenerator<Token, Completion, void> {
const geminiMessages = this.convertMessages(messages);
const systemInstruction = this.extractSystemInstruction(messages);
const requestBody: Record<string, unknown> = {
contents: geminiMessages,
generationConfig: {
temperature: options?.temperature ?? 0.7,
maxOutputTokens: options?.maxTokens ?? 4096,
},
};
if (systemInstruction) {
requestBody.systemInstruction = {
parts: [{ text: systemInstruction }],
};
}
const url = `${this.baseUrl}/models/${this.modelId}:streamGenerateContent?key=${this.config.apiKey}&alt=sse`;
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Google AI API error (${response.status}): ${errorText}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let buffer = '';
let fullContent = '';
let inputTokens = 0;
let outputTokens = 0;
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') continue;
try {
const json = JSON.parse(data) as GeminiResponse;
if (json.candidates?.[0]?.content?.parts?.[0]?.text) {
const text = json.candidates[0].content.parts[0].text;
fullContent += text;
options?.onToken?.(text);
yield { type: 'text', text };
}
if (json.usageMetadata) {
inputTokens = json.usageMetadata.promptTokenCount;
outputTokens = json.usageMetadata.candidatesTokenCount;
}
} catch {
// Skip invalid JSON
}
}
}
}
} finally {
reader.releaseLock();
}
return {
content: fullContent,
finishReason: 'stop',
usage: { inputTokens, outputTokens },
};
}
async countTokens(text: string): Promise<number> {
// Approximate token count (Gemini uses ~4 chars per token on average)
return Math.ceil(text.length / 4);
}
getModel(): ModelInfo {
return MODEL_INFO[this.modelId] ?? {
id: this.modelId,
name: this.modelId,
maxTokens: 8192,
contextWindow: 1000000,
};
}
async isHealthy(): Promise<boolean> {
try {
await this.countTokens('health check');
return !!this.config.apiKey;
} catch {
return false;
}
}
private convertMessages(messages: Message[]): GeminiMessage[] {
return messages
.filter(m => m.role !== 'system')
.map(m => ({
role: m.role === 'assistant' ? 'model' : 'user',
parts: [{ text: m.content }],
}));
}
private extractSystemInstruction(messages: Message[]): string | null {
const systemMessage = messages.find(m => m.role === 'system');
return systemMessage?.content || null;
}
private mapFinishReason(reason: string): 'stop' | 'length' | 'tool_use' {
switch (reason) {
case 'STOP':
case 'END_TURN':
return 'stop';
case 'MAX_TOKENS':
return 'length';
case 'TOOL_CALL':
return 'tool_use';
default:
return 'stop';
}
}
}
// ============================================================================
// Factory Function
// ============================================================================
export function createGoogleAIProvider(config: GoogleAIConfig): LLMProvider {
return new GoogleAIProvider(config);
}

View File

@@ -0,0 +1,62 @@
/**
* OpenRouterProvider - OpenRouter Multi-Model LLM Integration
*
* Provides access to multiple LLM providers through OpenRouter,
* including Qwen QwQ reasoning models, Claude, GPT-4, and more.
*/
import type { LLMProvider, Message, CompletionOptions, StreamOptions, Completion, Token, ModelInfo } from './index.js';
export interface OpenRouterConfig {
apiKey: string;
baseUrl?: string;
model?: string;
siteUrl?: string;
siteName?: string;
maxRetries?: number;
timeout?: number;
}
export type OpenRouterModel = 'qwen/qwq-32b' | 'qwen/qwq-32b:free' | 'qwen/qwq-32b-preview' | 'qwen/qwen3-max' | 'qwen/qwen-2.5-72b-instruct' | 'qwen/qwen-2.5-coder-32b-instruct' | 'anthropic/claude-3.5-sonnet' | 'anthropic/claude-3-opus' | 'anthropic/claude-3-haiku' | 'openai/gpt-4-turbo' | 'openai/gpt-4o' | 'openai/o1-preview' | 'openai/o1-mini' | 'google/gemini-2.5-pro-preview-05-06' | 'google/gemini-2.0-flash-001' | 'google/gemini-2.0-flash-lite-001' | 'google/gemini-2.0-flash-thinking-exp:free' | 'google/gemini-pro-1.5' | 'google/gemini-flash-1.5' | 'meta-llama/llama-3.1-405b-instruct' | 'meta-llama/llama-3.1-70b-instruct' | 'deepseek/deepseek-r1' | 'deepseek/deepseek-chat' | string;
export declare class OpenRouterProvider implements LLMProvider {
private readonly config;
private readonly model;
constructor(config: OpenRouterConfig);
/**
* Complete a conversation
*/
complete(messages: Message[], options?: CompletionOptions): Promise<Completion>;
/**
* Stream a conversation
*/
stream(messages: Message[], options?: StreamOptions): AsyncGenerator<Token, Completion, void>;
/**
* Count tokens in text
*/
countTokens(text: string): Promise<number>;
/**
* Get model info
*/
getModel(): ModelInfo;
/**
* Check provider health
*/
isHealthy(): Promise<boolean>;
/**
* List available models
*/
listModels(): Promise<string[]>;
private convertMessages;
private convertTools;
private convertResponse;
private makeRequest;
private makeStreamRequest;
}
export declare function createOpenRouterProvider(config: OpenRouterConfig): OpenRouterProvider;
/**
* Create a provider for Qwen QwQ reasoning model
*/
export declare function createQwQProvider(apiKey: string, free?: boolean): OpenRouterProvider;
/**
* Create a provider for DeepSeek R1 reasoning model
*/
export declare function createDeepSeekR1Provider(apiKey: string): OpenRouterProvider;
export default OpenRouterProvider;
//# sourceMappingURL=OpenRouterProvider.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"OpenRouterProvider.d.ts","sourceRoot":"","sources":["OpenRouterProvider.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,EACV,WAAW,EACX,OAAO,EACP,iBAAiB,EACjB,aAAa,EACb,UAAU,EACV,KAAK,EACL,SAAS,EAGV,MAAM,YAAY,CAAC;AAMpB,MAAM,WAAW,gBAAgB;IAC/B,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,OAAO,CAAC,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,eAAe,GAEvB,cAAc,GACd,mBAAmB,GACnB,sBAAsB,GAEtB,gBAAgB,GAChB,4BAA4B,GAC5B,kCAAkC,GAElC,6BAA6B,GAC7B,yBAAyB,GACzB,0BAA0B,GAE1B,oBAAoB,GACpB,eAAe,GACf,mBAAmB,GACnB,gBAAgB,GAEhB,qCAAqC,GACrC,6BAA6B,GAC7B,kCAAkC,GAClC,2CAA2C,GAC3C,uBAAuB,GACvB,yBAAyB,GAEzB,oCAAoC,GACpC,mCAAmC,GAEnC,sBAAsB,GACtB,wBAAwB,GAExB,MAAM,CAAC;AAyMX,qBAAa,kBAAmB,YAAW,WAAW;IACpD,OAAO,CAAC,QAAQ,CAAC,MAAM,CAA6B;IACpD,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAkB;gBAE5B,MAAM,EAAE,gBAAgB;IAapC;;OAEG;IACG,QAAQ,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,iBAAiB,GAAG,OAAO,CAAC,UAAU,CAAC;IAerF;;OAEG;IACI,MAAM,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,aAAa,GAAG,cAAc,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,CAAC;IA8EpG;;OAEG;IACG,WAAW,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAKhD;;OAEG;IACH,QAAQ,IAAI,SAAS;IASrB;;OAEG;IACG,SAAS,IAAI,OAAO,CAAC,OAAO,CAAC;IAanC;;OAEG;IACG,UAAU,IAAI,OAAO,CAAC,MAAM,EAAE,CAAC;IAqBrC,OAAO,CAAC,eAAe;IAOvB,OAAO,CAAC,YAAY;IAWpB,OAAO,CAAC,eAAe;YAwBT,WAAW;YA8BV,iBAAiB;CA8DjC;AAMD,wBAAgB,wBAAwB,CAAC,MAAM,EAAE,gBAAgB,GAAG,kBAAkB,CAErF;AAED;;GAEG;AACH,wBAAgB,iBAAiB,CAAC,MAAM,EAAE,MAAM,EAAE,IAAI,GAAE,OAAe,GAAG,kBAAkB,CAK3F;AAED;;GAEG;AACH,wBAAgB,wBAAwB,CAAC,MAAM,EAAE,MAAM,GAAG,kBAAkB,CAK3E;AAED,eAAe,kBAAkB,CAAC"}

View File

@@ -0,0 +1,450 @@
"use strict";
/**
* OpenRouterProvider - OpenRouter Multi-Model LLM Integration
*
* Provides access to multiple LLM providers through OpenRouter,
* including Qwen QwQ reasoning models, Claude, GPT-4, and more.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenRouterProvider = void 0;
exports.createOpenRouterProvider = createOpenRouterProvider;
exports.createQwQProvider = createQwQProvider;
exports.createDeepSeekR1Provider = createDeepSeekR1Provider;
// ============================================================================
// Model Info Registry
// ============================================================================
const MODEL_INFO = {
// QwQ Reasoning Models
'qwen/qwq-32b': {
id: 'qwen/qwq-32b',
name: 'Qwen QwQ 32B (Reasoning)',
maxTokens: 16384,
contextWindow: 32768,
},
'qwen/qwq-32b:free': {
id: 'qwen/qwq-32b:free',
name: 'Qwen QwQ 32B Free (Reasoning)',
maxTokens: 16384,
contextWindow: 32768,
},
'qwen/qwq-32b-preview': {
id: 'qwen/qwq-32b-preview',
name: 'Qwen QwQ 32B Preview (Reasoning)',
maxTokens: 16384,
contextWindow: 32768,
},
// Qwen Standard Models
'qwen/qwen3-max': {
id: 'qwen/qwen3-max',
name: 'Qwen3 Max',
maxTokens: 8192,
contextWindow: 32768,
},
'qwen/qwen-2.5-72b-instruct': {
id: 'qwen/qwen-2.5-72b-instruct',
name: 'Qwen 2.5 72B Instruct',
maxTokens: 8192,
contextWindow: 32768,
},
'qwen/qwen-2.5-coder-32b-instruct': {
id: 'qwen/qwen-2.5-coder-32b-instruct',
name: 'Qwen 2.5 Coder 32B',
maxTokens: 8192,
contextWindow: 32768,
},
// Anthropic
'anthropic/claude-3.5-sonnet': {
id: 'anthropic/claude-3.5-sonnet',
name: 'Claude 3.5 Sonnet',
maxTokens: 8192,
contextWindow: 200000,
},
'anthropic/claude-3-opus': {
id: 'anthropic/claude-3-opus',
name: 'Claude 3 Opus',
maxTokens: 4096,
contextWindow: 200000,
},
// OpenAI
'openai/gpt-4o': {
id: 'openai/gpt-4o',
name: 'GPT-4o',
maxTokens: 16384,
contextWindow: 128000,
},
'openai/o1-preview': {
id: 'openai/o1-preview',
name: 'O1 Preview (Reasoning)',
maxTokens: 32768,
contextWindow: 128000,
},
'openai/o1-mini': {
id: 'openai/o1-mini',
name: 'O1 Mini (Reasoning)',
maxTokens: 65536,
contextWindow: 128000,
},
// DeepSeek
'deepseek/deepseek-r1': {
id: 'deepseek/deepseek-r1',
name: 'DeepSeek R1 (Reasoning)',
maxTokens: 8192,
contextWindow: 64000,
},
'deepseek/deepseek-chat': {
id: 'deepseek/deepseek-chat',
name: 'DeepSeek Chat',
maxTokens: 4096,
contextWindow: 32000,
},
// Google Gemini 2.x
'google/gemini-2.5-pro-preview-05-06': {
id: 'google/gemini-2.5-pro-preview-05-06',
name: 'Gemini 2.5 Pro Preview',
maxTokens: 65536,
contextWindow: 1000000,
},
'google/gemini-2.0-flash-001': {
id: 'google/gemini-2.0-flash-001',
name: 'Gemini 2.0 Flash',
maxTokens: 8192,
contextWindow: 1000000,
},
'google/gemini-2.0-flash-lite-001': {
id: 'google/gemini-2.0-flash-lite-001',
name: 'Gemini 2.0 Flash Lite',
maxTokens: 8192,
contextWindow: 1000000,
},
'google/gemini-2.0-flash-thinking-exp:free': {
id: 'google/gemini-2.0-flash-thinking-exp:free',
name: 'Gemini 2.0 Flash Thinking (Free)',
maxTokens: 32768,
contextWindow: 1000000,
},
// Google Gemini 1.5
'google/gemini-pro-1.5': {
id: 'google/gemini-pro-1.5',
name: 'Gemini Pro 1.5',
maxTokens: 8192,
contextWindow: 1000000,
},
// Meta
'meta-llama/llama-3.1-405b-instruct': {
id: 'meta-llama/llama-3.1-405b-instruct',
name: 'Llama 3.1 405B Instruct',
maxTokens: 4096,
contextWindow: 128000,
},
};
// ============================================================================
// OpenRouterProvider Implementation
// ============================================================================
class OpenRouterProvider {
constructor(config) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl ?? 'https://openrouter.ai/api',
model: config.model ?? 'qwen/qwq-32b', // Default to QwQ reasoning model
siteUrl: config.siteUrl ?? '',
siteName: config.siteName ?? 'RuvBot',
maxRetries: config.maxRetries ?? 3,
timeout: config.timeout ?? 120000, // Longer timeout for reasoning models
};
this.model = this.config.model;
}
/**
* Complete a conversation
*/
async complete(messages, options) {
const modelInfo = this.getModel();
const response = await this.makeRequest('/v1/chat/completions', {
model: this.model,
max_tokens: options?.maxTokens ?? modelInfo.maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
});
return this.convertResponse(response);
}
/**
* Stream a conversation
*/
async *stream(messages, options) {
const modelInfo = this.getModel();
const response = await this.makeStreamRequest('/v1/chat/completions', {
model: this.model,
max_tokens: options?.maxTokens ?? modelInfo.maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
stream: true,
});
let fullContent = '';
let inputTokens = 0;
let outputTokens = 0;
const toolCalls = [];
let finishReason = 'stop';
const pendingToolCalls = new Map();
for await (const chunk of response) {
const choice = chunk.choices[0];
if (!choice)
continue;
// Handle content delta
if (choice.delta.content) {
fullContent += choice.delta.content;
options?.onToken?.(choice.delta.content);
yield { type: 'text', text: choice.delta.content };
}
// Handle tool calls
if (choice.delta.tool_calls) {
for (const tc of choice.delta.tool_calls) {
if (!pendingToolCalls.has(tc.index)) {
pendingToolCalls.set(tc.index, { id: tc.id ?? '', name: '', arguments: '' });
}
const pending = pendingToolCalls.get(tc.index);
if (tc.id)
pending.id = tc.id;
if (tc.function?.name)
pending.name = tc.function.name;
if (tc.function?.arguments)
pending.arguments += tc.function.arguments;
}
}
// Handle finish reason
if (choice.finish_reason) {
if (choice.finish_reason === 'tool_calls')
finishReason = 'tool_use';
else if (choice.finish_reason === 'length')
finishReason = 'length';
}
// Handle usage
if (chunk.usage) {
inputTokens = chunk.usage.prompt_tokens;
outputTokens = chunk.usage.completion_tokens;
}
}
// Finalize tool calls
for (const pending of pendingToolCalls.values()) {
if (pending.id && pending.name) {
try {
const input = JSON.parse(pending.arguments || '{}');
toolCalls.push({ id: pending.id, name: pending.name, input });
yield { type: 'tool_use', toolUse: { id: pending.id, name: pending.name, input } };
}
catch {
// Skip invalid JSON
}
}
}
return {
content: fullContent,
finishReason,
usage: { inputTokens, outputTokens },
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
/**
* Count tokens in text
*/
async countTokens(text) {
// Approximate token count (~4 chars per token)
return Math.ceil(text.length / 4);
}
/**
* Get model info
*/
getModel() {
return MODEL_INFO[this.model] ?? {
id: this.model,
name: this.model,
maxTokens: 4096,
contextWindow: 32000,
};
}
/**
* Check provider health
*/
async isHealthy() {
try {
const response = await fetch(`${this.config.baseUrl}/v1/models`, {
headers: {
'Authorization': `Bearer ${this.config.apiKey}`,
},
});
return response.ok;
}
catch {
return false;
}
}
/**
* List available models
*/
async listModels() {
try {
const response = await fetch(`${this.config.baseUrl}/v1/models`, {
headers: {
'Authorization': `Bearer ${this.config.apiKey}`,
},
});
if (!response.ok)
return Object.keys(MODEL_INFO);
const data = await response.json();
return data.data.map(m => m.id);
}
catch {
return Object.keys(MODEL_INFO);
}
}
// ==========================================================================
// Private Methods
// ==========================================================================
convertMessages(messages) {
return messages.map(msg => ({
role: msg.role,
content: msg.content,
}));
}
convertTools(tools) {
return tools.map(tool => ({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters,
},
}));
}
convertResponse(response) {
const choice = response.choices[0];
const toolCalls = (choice.message.tool_calls ?? []).map(tc => ({
id: tc.id,
name: tc.function.name,
input: JSON.parse(tc.function.arguments || '{}'),
}));
let finishReason = 'stop';
if (choice.finish_reason === 'length')
finishReason = 'length';
if (choice.finish_reason === 'tool_calls')
finishReason = 'tool_use';
return {
content: choice.message.content ?? '',
finishReason,
usage: {
inputTokens: response.usage.prompt_tokens,
outputTokens: response.usage.completion_tokens,
},
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
async makeRequest(endpoint, body) {
const url = `${this.config.baseUrl}${endpoint}`;
const headers = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.siteUrl) {
headers['HTTP-Referer'] = this.config.siteUrl;
}
if (this.config.siteName) {
headers['X-Title'] = this.config.siteName;
}
const response = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`OpenRouter API error: ${response.status} - ${error}`);
}
return response.json();
}
async *makeStreamRequest(endpoint, body) {
const url = `${this.config.baseUrl}${endpoint}`;
const headers = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.siteUrl) {
headers['HTTP-Referer'] = this.config.siteUrl;
}
if (this.config.siteName) {
headers['X-Title'] = this.config.siteName;
}
const response = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`OpenRouter API error: ${response.status} - ${error}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let buffer = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done)
break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]')
return;
try {
yield JSON.parse(data);
}
catch {
// Skip invalid JSON
}
}
}
}
}
finally {
reader.releaseLock();
}
}
}
exports.OpenRouterProvider = OpenRouterProvider;
// ============================================================================
// Factory Functions
// ============================================================================
function createOpenRouterProvider(config) {
return new OpenRouterProvider(config);
}
/**
* Create a provider for Qwen QwQ reasoning model
*/
function createQwQProvider(apiKey, free = false) {
return new OpenRouterProvider({
apiKey,
model: free ? 'qwen/qwq-32b:free' : 'qwen/qwq-32b',
});
}
/**
* Create a provider for DeepSeek R1 reasoning model
*/
function createDeepSeekR1Provider(apiKey) {
return new OpenRouterProvider({
apiKey,
model: 'deepseek/deepseek-r1',
});
}
exports.default = OpenRouterProvider;
//# sourceMappingURL=OpenRouterProvider.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,607 @@
/**
* OpenRouterProvider - OpenRouter Multi-Model LLM Integration
*
* Provides access to multiple LLM providers through OpenRouter,
* including Qwen QwQ reasoning models, Claude, GPT-4, and more.
*/
import type {
LLMProvider,
Message,
CompletionOptions,
StreamOptions,
Completion,
Token,
ModelInfo,
Tool,
ToolCall,
} from './index.js';
// ============================================================================
// Types
// ============================================================================
export interface OpenRouterConfig {
apiKey: string;
baseUrl?: string;
model?: string;
siteUrl?: string;
siteName?: string;
maxRetries?: number;
timeout?: number;
}
export type OpenRouterModel =
// Qwen Reasoning Models (QwQ)
| 'qwen/qwq-32b'
| 'qwen/qwq-32b:free'
| 'qwen/qwq-32b-preview'
// Qwen Models
| 'qwen/qwen3-max'
| 'qwen/qwen-2.5-72b-instruct'
| 'qwen/qwen-2.5-coder-32b-instruct'
// Anthropic via OpenRouter
| 'anthropic/claude-3.5-sonnet'
| 'anthropic/claude-3-opus'
| 'anthropic/claude-3-haiku'
// OpenAI via OpenRouter
| 'openai/gpt-4-turbo'
| 'openai/gpt-4o'
| 'openai/o1-preview'
| 'openai/o1-mini'
// Google Gemini 2.x via OpenRouter
| 'google/gemini-2.5-pro-preview-05-06'
| 'google/gemini-2.0-flash-001'
| 'google/gemini-2.0-flash-lite-001'
| 'google/gemini-2.0-flash-thinking-exp:free'
| 'google/gemini-pro-1.5'
| 'google/gemini-flash-1.5'
// Meta via OpenRouter
| 'meta-llama/llama-3.1-405b-instruct'
| 'meta-llama/llama-3.1-70b-instruct'
// DeepSeek
| 'deepseek/deepseek-r1'
| 'deepseek/deepseek-chat'
// Other models
| string;
interface OpenRouterMessage {
role: 'user' | 'assistant' | 'system';
content: string;
}
interface OpenRouterTool {
type: 'function';
function: {
name: string;
description: string;
parameters: Record<string, unknown>;
};
}
interface OpenRouterResponse {
id: string;
model: string;
choices: Array<{
index: number;
message: {
role: 'assistant';
content: string | null;
tool_calls?: Array<{
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}>;
};
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter';
}>;
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
interface OpenRouterStreamChunk {
id: string;
model: string;
choices: Array<{
index: number;
delta: {
role?: string;
content?: string;
tool_calls?: Array<{
index: number;
id?: string;
type?: 'function';
function?: {
name?: string;
arguments?: string;
};
}>;
};
finish_reason: string | null;
}>;
usage?: {
prompt_tokens: number;
completion_tokens: number;
};
}
// ============================================================================
// Model Info Registry
// ============================================================================
const MODEL_INFO: Record<string, ModelInfo> = {
// QwQ Reasoning Models
'qwen/qwq-32b': {
id: 'qwen/qwq-32b',
name: 'Qwen QwQ 32B (Reasoning)',
maxTokens: 16384,
contextWindow: 32768,
},
'qwen/qwq-32b:free': {
id: 'qwen/qwq-32b:free',
name: 'Qwen QwQ 32B Free (Reasoning)',
maxTokens: 16384,
contextWindow: 32768,
},
'qwen/qwq-32b-preview': {
id: 'qwen/qwq-32b-preview',
name: 'Qwen QwQ 32B Preview (Reasoning)',
maxTokens: 16384,
contextWindow: 32768,
},
// Qwen Standard Models
'qwen/qwen3-max': {
id: 'qwen/qwen3-max',
name: 'Qwen3 Max',
maxTokens: 8192,
contextWindow: 32768,
},
'qwen/qwen-2.5-72b-instruct': {
id: 'qwen/qwen-2.5-72b-instruct',
name: 'Qwen 2.5 72B Instruct',
maxTokens: 8192,
contextWindow: 32768,
},
'qwen/qwen-2.5-coder-32b-instruct': {
id: 'qwen/qwen-2.5-coder-32b-instruct',
name: 'Qwen 2.5 Coder 32B',
maxTokens: 8192,
contextWindow: 32768,
},
// Anthropic
'anthropic/claude-3.5-sonnet': {
id: 'anthropic/claude-3.5-sonnet',
name: 'Claude 3.5 Sonnet',
maxTokens: 8192,
contextWindow: 200000,
},
'anthropic/claude-3-opus': {
id: 'anthropic/claude-3-opus',
name: 'Claude 3 Opus',
maxTokens: 4096,
contextWindow: 200000,
},
// OpenAI
'openai/gpt-4o': {
id: 'openai/gpt-4o',
name: 'GPT-4o',
maxTokens: 16384,
contextWindow: 128000,
},
'openai/o1-preview': {
id: 'openai/o1-preview',
name: 'O1 Preview (Reasoning)',
maxTokens: 32768,
contextWindow: 128000,
},
'openai/o1-mini': {
id: 'openai/o1-mini',
name: 'O1 Mini (Reasoning)',
maxTokens: 65536,
contextWindow: 128000,
},
// DeepSeek
'deepseek/deepseek-r1': {
id: 'deepseek/deepseek-r1',
name: 'DeepSeek R1 (Reasoning)',
maxTokens: 8192,
contextWindow: 64000,
},
'deepseek/deepseek-chat': {
id: 'deepseek/deepseek-chat',
name: 'DeepSeek Chat',
maxTokens: 4096,
contextWindow: 32000,
},
// Google Gemini 2.x
'google/gemini-2.5-pro-preview-05-06': {
id: 'google/gemini-2.5-pro-preview-05-06',
name: 'Gemini 2.5 Pro Preview',
maxTokens: 65536,
contextWindow: 1000000,
},
'google/gemini-2.0-flash-001': {
id: 'google/gemini-2.0-flash-001',
name: 'Gemini 2.0 Flash',
maxTokens: 8192,
contextWindow: 1000000,
},
'google/gemini-2.0-flash-lite-001': {
id: 'google/gemini-2.0-flash-lite-001',
name: 'Gemini 2.0 Flash Lite',
maxTokens: 8192,
contextWindow: 1000000,
},
'google/gemini-2.0-flash-thinking-exp:free': {
id: 'google/gemini-2.0-flash-thinking-exp:free',
name: 'Gemini 2.0 Flash Thinking (Free)',
maxTokens: 32768,
contextWindow: 1000000,
},
// Google Gemini 1.5
'google/gemini-pro-1.5': {
id: 'google/gemini-pro-1.5',
name: 'Gemini Pro 1.5',
maxTokens: 8192,
contextWindow: 1000000,
},
// Meta
'meta-llama/llama-3.1-405b-instruct': {
id: 'meta-llama/llama-3.1-405b-instruct',
name: 'Llama 3.1 405B Instruct',
maxTokens: 4096,
contextWindow: 128000,
},
};
// ============================================================================
// OpenRouterProvider Implementation
// ============================================================================
export class OpenRouterProvider implements LLMProvider {
private readonly config: Required<OpenRouterConfig>;
private readonly model: OpenRouterModel;
constructor(config: OpenRouterConfig) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl ?? 'https://openrouter.ai/api',
model: config.model ?? 'qwen/qwq-32b', // Default to QwQ reasoning model
siteUrl: config.siteUrl ?? '',
siteName: config.siteName ?? 'RuvBot',
maxRetries: config.maxRetries ?? 3,
timeout: config.timeout ?? 120000, // Longer timeout for reasoning models
};
this.model = this.config.model;
}
/**
* Complete a conversation
*/
async complete(messages: Message[], options?: CompletionOptions): Promise<Completion> {
const modelInfo = this.getModel();
const response = await this.makeRequest<OpenRouterResponse>('/v1/chat/completions', {
model: this.model,
max_tokens: options?.maxTokens ?? modelInfo.maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
});
return this.convertResponse(response);
}
/**
* Stream a conversation
*/
async *stream(messages: Message[], options?: StreamOptions): AsyncGenerator<Token, Completion, void> {
const modelInfo = this.getModel();
const response = await this.makeStreamRequest('/v1/chat/completions', {
model: this.model,
max_tokens: options?.maxTokens ?? modelInfo.maxTokens,
temperature: options?.temperature ?? 1.0,
top_p: options?.topP,
stop: options?.stopSequences,
messages: this.convertMessages(messages),
tools: options?.tools ? this.convertTools(options.tools) : undefined,
stream: true,
});
let fullContent = '';
let inputTokens = 0;
let outputTokens = 0;
const toolCalls: ToolCall[] = [];
let finishReason: Completion['finishReason'] = 'stop';
const pendingToolCalls: Map<number, { id: string; name: string; arguments: string }> = new Map();
for await (const chunk of response) {
const choice = chunk.choices[0];
if (!choice) continue;
// Handle content delta
if (choice.delta.content) {
fullContent += choice.delta.content;
options?.onToken?.(choice.delta.content);
yield { type: 'text', text: choice.delta.content };
}
// Handle tool calls
if (choice.delta.tool_calls) {
for (const tc of choice.delta.tool_calls) {
if (!pendingToolCalls.has(tc.index)) {
pendingToolCalls.set(tc.index, { id: tc.id ?? '', name: '', arguments: '' });
}
const pending = pendingToolCalls.get(tc.index)!;
if (tc.id) pending.id = tc.id;
if (tc.function?.name) pending.name = tc.function.name;
if (tc.function?.arguments) pending.arguments += tc.function.arguments;
}
}
// Handle finish reason
if (choice.finish_reason) {
if (choice.finish_reason === 'tool_calls') finishReason = 'tool_use';
else if (choice.finish_reason === 'length') finishReason = 'length';
}
// Handle usage
if (chunk.usage) {
inputTokens = chunk.usage.prompt_tokens;
outputTokens = chunk.usage.completion_tokens;
}
}
// Finalize tool calls
for (const pending of pendingToolCalls.values()) {
if (pending.id && pending.name) {
try {
const input = JSON.parse(pending.arguments || '{}');
toolCalls.push({ id: pending.id, name: pending.name, input });
yield { type: 'tool_use', toolUse: { id: pending.id, name: pending.name, input } };
} catch {
// Skip invalid JSON
}
}
}
return {
content: fullContent,
finishReason,
usage: { inputTokens, outputTokens },
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
/**
* Count tokens in text
*/
async countTokens(text: string): Promise<number> {
// Approximate token count (~4 chars per token)
return Math.ceil(text.length / 4);
}
/**
* Get model info
*/
getModel(): ModelInfo {
return MODEL_INFO[this.model] ?? {
id: this.model,
name: this.model,
maxTokens: 4096,
contextWindow: 32000,
};
}
/**
* Check provider health
*/
async isHealthy(): Promise<boolean> {
try {
const response = await fetch(`${this.config.baseUrl}/v1/models`, {
headers: {
'Authorization': `Bearer ${this.config.apiKey}`,
},
});
return response.ok;
} catch {
return false;
}
}
/**
* List available models
*/
async listModels(): Promise<string[]> {
try {
const response = await fetch(`${this.config.baseUrl}/v1/models`, {
headers: {
'Authorization': `Bearer ${this.config.apiKey}`,
},
});
if (!response.ok) return Object.keys(MODEL_INFO);
const data = await response.json() as { data: Array<{ id: string }> };
return data.data.map(m => m.id);
} catch {
return Object.keys(MODEL_INFO);
}
}
// ==========================================================================
// Private Methods
// ==========================================================================
private convertMessages(messages: Message[]): OpenRouterMessage[] {
return messages.map(msg => ({
role: msg.role,
content: msg.content,
}));
}
private convertTools(tools: Tool[]): OpenRouterTool[] {
return tools.map(tool => ({
type: 'function' as const,
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters,
},
}));
}
private convertResponse(response: OpenRouterResponse): Completion {
const choice = response.choices[0];
const toolCalls: ToolCall[] = (choice.message.tool_calls ?? []).map(tc => ({
id: tc.id,
name: tc.function.name,
input: JSON.parse(tc.function.arguments || '{}'),
}));
let finishReason: Completion['finishReason'] = 'stop';
if (choice.finish_reason === 'length') finishReason = 'length';
if (choice.finish_reason === 'tool_calls') finishReason = 'tool_use';
return {
content: choice.message.content ?? '',
finishReason,
usage: {
inputTokens: response.usage.prompt_tokens,
outputTokens: response.usage.completion_tokens,
},
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
};
}
private async makeRequest<T>(endpoint: string, body: Record<string, unknown>): Promise<T> {
const url = `${this.config.baseUrl}${endpoint}`;
const headers: Record<string, string> = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.siteUrl) {
headers['HTTP-Referer'] = this.config.siteUrl;
}
if (this.config.siteName) {
headers['X-Title'] = this.config.siteName;
}
const response = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`OpenRouter API error: ${response.status} - ${error}`);
}
return response.json() as Promise<T>;
}
private async *makeStreamRequest(
endpoint: string,
body: Record<string, unknown>
): AsyncGenerator<OpenRouterStreamChunk, void, void> {
const url = `${this.config.baseUrl}${endpoint}`;
const headers: Record<string, string> = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.siteUrl) {
headers['HTTP-Referer'] = this.config.siteUrl;
}
if (this.config.siteName) {
headers['X-Title'] = this.config.siteName;
}
const response = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`OpenRouter API error: ${response.status} - ${error}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let buffer = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') return;
try {
yield JSON.parse(data) as OpenRouterStreamChunk;
} catch {
// Skip invalid JSON
}
}
}
}
} finally {
reader.releaseLock();
}
}
}
// ============================================================================
// Factory Functions
// ============================================================================
export function createOpenRouterProvider(config: OpenRouterConfig): OpenRouterProvider {
return new OpenRouterProvider(config);
}
/**
* Create a provider for Qwen QwQ reasoning model
*/
export function createQwQProvider(apiKey: string, free: boolean = false): OpenRouterProvider {
return new OpenRouterProvider({
apiKey,
model: free ? 'qwen/qwq-32b:free' : 'qwen/qwq-32b',
});
}
/**
* Create a provider for DeepSeek R1 reasoning model
*/
export function createDeepSeekR1Provider(apiKey: string): OpenRouterProvider {
return new OpenRouterProvider({
apiKey,
model: 'deepseek/deepseek-r1',
});
}
export default OpenRouterProvider;

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,MAAM,WAAW,gBAAgB;IAC/B,GAAG,EAAE,WAAW,CAAC;IACjB,SAAS,EAAE,iBAAiB,CAAC;CAC9B;AAED,MAAM,WAAW,WAAW;IAC1B,QAAQ,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,iBAAiB,GAAG,OAAO,CAAC,UAAU,CAAC,CAAC;IAChF,MAAM,CAAC,QAAQ,EAAE,OAAO,EAAE,EAAE,OAAO,CAAC,EAAE,aAAa,GAAG,cAAc,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,CAAC,CAAC;IAC9F,WAAW,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC;IAC3C,QAAQ,IAAI,SAAS,CAAC;IACtB,SAAS,IAAI,OAAO,CAAC,OAAO,CAAC,CAAC;CAC/B;AAED,MAAM,WAAW,OAAO;IACtB,IAAI,EAAE,MAAM,GAAG,WAAW,GAAG,QAAQ,CAAC;IACtC,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,iBAAiB;IAChC,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;IACzB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CAChB;AAED,MAAM,WAAW,aAAc,SAAQ,iBAAiB;IACtD,OAAO,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,CAAC;CACnC;AAED,MAAM,WAAW,UAAU;IACzB,OAAO,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,GAAG,QAAQ,GAAG,UAAU,CAAC;IAC7C,KAAK,EAAE;QACL,WAAW,EAAE,MAAM,CAAC;QACpB,YAAY,EAAE,MAAM,CAAC;KACtB,CAAC;IACF,SAAS,CAAC,EAAE,QAAQ,EAAE,CAAC;CACxB;AAED,MAAM,WAAW,KAAK;IACpB,IAAI,EAAE,MAAM,GAAG,UAAU,CAAC;IAC1B,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,OAAO,CAAC,EAAE,QAAQ,CAAC;CACpB;AAED,MAAM,WAAW,IAAI;IACnB,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACrC;AAED,MAAM,WAAW,QAAQ;IACvB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAChC;AAED,MAAM,WAAW,SAAS;IACxB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC;IAClB,aAAa,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,iBAAiB;IAChC,KAAK,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;IAChD,WAAW,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,YAAY,CAAC,CAAC;IACjD,UAAU,IAAI,MAAM,CAAC;IACrB,KAAK,IAAI,MAAM,CAAC;CACjB;AAGD,OAAO,EACL,iBAAiB,EACjB,uBAAuB,EACvB,KAAK,eAAe,EACpB,KAAK,cAAc,GACpB,MAAM,wBAAwB,CAAC;AAEhC,OAAO,EACL,kBAAkB,EAClB,wBAAwB,EACxB,iBAAiB,EACjB,wBAAwB,EACxB,KAAK,gBAAgB,EACrB,KAAK,eAAe,GACrB,MAAM,yBAAyB,CAAC;AAEjC,OAAO,EACL,sBAAsB,EACtB,KAAK,cAAc,EACnB,KAAK,aAAa,GACnB,MAAM,uBAAuB,CAAC"}

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":";AAAA;;GAEG;;;AA0EH,2BAA2B;AAC3B,+DAKgC;AAJ9B,yHAAA,iBAAiB,OAAA;AACjB,+HAAA,uBAAuB,OAAA;AAKzB,iEAOiC;AAN/B,2HAAA,kBAAkB,OAAA;AAClB,iIAAA,wBAAwB,OAAA;AACxB,0HAAA,iBAAiB,OAAA;AACjB,iIAAA,wBAAwB,OAAA;AAK1B,6DAI+B;AAH7B,6HAAA,sBAAsB,OAAA"}

View File

@@ -0,0 +1,98 @@
/**
* AI Provider Integration - LLM and Embedding Providers
*/
export interface ProviderRegistry {
llm: LLMProvider;
embedding: EmbeddingProvider;
}
export interface LLMProvider {
complete(messages: Message[], options?: CompletionOptions): Promise<Completion>;
stream(messages: Message[], options?: StreamOptions): AsyncGenerator<Token, Completion, void>;
countTokens(text: string): Promise<number>;
getModel(): ModelInfo;
isHealthy(): Promise<boolean>;
}
export interface Message {
role: 'user' | 'assistant' | 'system';
content: string;
}
export interface CompletionOptions {
maxTokens?: number;
temperature?: number;
topP?: number;
stopSequences?: string[];
tools?: Tool[];
}
export interface StreamOptions extends CompletionOptions {
onToken?: (token: string) => void;
}
export interface Completion {
content: string;
finishReason: 'stop' | 'length' | 'tool_use';
usage: {
inputTokens: number;
outputTokens: number;
};
toolCalls?: ToolCall[];
}
export interface Token {
type: 'text' | 'tool_use';
text?: string;
toolUse?: ToolCall;
}
export interface Tool {
name: string;
description: string;
parameters: Record<string, unknown>;
}
export interface ToolCall {
id: string;
name: string;
input: Record<string, unknown>;
}
export interface ModelInfo {
id: string;
name: string;
maxTokens: number;
contextWindow: number;
}
export interface EmbeddingProvider {
embed(texts: string[]): Promise<Float32Array[]>;
embedSingle(text: string): Promise<Float32Array>;
dimensions(): number;
model(): string;
}
// Provider implementations
export {
AnthropicProvider,
createAnthropicProvider,
type AnthropicConfig,
type AnthropicModel,
} from './AnthropicProvider.js';
export {
OpenRouterProvider,
createOpenRouterProvider,
createQwQProvider,
createDeepSeekR1Provider,
type OpenRouterConfig,
type OpenRouterModel,
} from './OpenRouterProvider.js';
export {
createGoogleAIProvider,
type GoogleAIConfig,
type GoogleAIModel,
} from './GoogleAIProvider.js';

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,MAAM,WAAW,gBAAgB;IAC/B,MAAM,EAAE,iBAAiB,CAAC;IAC1B,QAAQ,EAAE,mBAAmB,CAAC;IAC9B,MAAM,EAAE,YAAY,CAAC;CACtB;AAED,MAAM,WAAW,iBAAiB;IAChC,SAAS,CAAC,OAAO,EAAE,CAAC,KAAK,EAAE,iBAAiB,KAAK,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;IACtE,SAAS,CAAC,OAAO,EAAE,CAAC,KAAK,EAAE,iBAAiB,KAAK,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;IACtE,UAAU,CAAC,OAAO,EAAE,CAAC,KAAK,EAAE,kBAAkB,KAAK,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;CACzE;AAED,MAAM,WAAW,iBAAiB;IAChC,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,EAAE,EAAE,MAAM,CAAC;CACZ;AAED,MAAM,WAAW,iBAAkB,SAAQ,iBAAiB;IAC1D,eAAe,EAAE,MAAM,CAAC;CACzB;AAED,MAAM,WAAW,kBAAkB;IACjC,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,MAAM,CAAC;IACjB,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,OAAO,CAAC;CAChB;AAED,MAAM,WAAW,mBAAmB;IAClC,QAAQ,CAAC,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,cAAc,GAAG,IAAI,CAAC;CAC1D;AAED,MAAM,MAAM,cAAc,GAAG,CAAC,GAAG,EAAE,cAAc,KAAK,OAAO,CAAC,aAAa,CAAC,CAAC;AAE7E,MAAM,WAAW,cAAc;IAC7B,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,MAAM,EAAE,MAAM,CAAC;IACf,WAAW,EAAE,MAAM,CAAC;CACrB;AAED,MAAM,WAAW,aAAa;IAC5B,MAAM,EAAE,OAAO,CAAC;IAChB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,MAAM,CAAC,EAAE,KAAK,EAAE,CAAC;CAClB;AAED,MAAM,WAAW,YAAY;IAC3B,OAAO,CAAC,IAAI,EAAE,MAAM,GAAG,YAAY,CAAC;IACpC,OAAO,IAAI,YAAY,CAAC;IACxB,OAAO,CAAC,GAAG,QAAQ,EAAE,MAAM,EAAE,GAAG,YAAY,CAAC;IAC7C,OAAO,CAAC,QAAQ,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,GAAG,YAAY,CAAC;IAC3D,KAAK,IAAI,KAAK,EAAE,CAAC;CAClB;AAED,MAAM,WAAW,KAAK;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CACxB;AAED,MAAM,WAAW,MAAM;IACrB,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;IACjB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,KAAK,CAAC,EAAE,SAAS,GAAG,QAAQ,CAAC;CAC9B"}

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":";AAAA;;GAEG"}

View File

@@ -0,0 +1,78 @@
/**
* Slack Integration - Events, Commands, Blocks
*/
export interface SlackIntegration {
events: SlackEventHandler;
commands: SlackCommandHandler;
blocks: BlockBuilder;
}
export interface SlackEventHandler {
onMessage(handler: (event: SlackMessageEvent) => Promise<void>): void;
onMention(handler: (event: SlackMentionEvent) => Promise<void>): void;
onReaction(handler: (event: SlackReactionEvent) => Promise<void>): void;
}
export interface SlackMessageEvent {
teamId: string;
channelId: string;
userId: string;
text: string;
threadTs?: string;
ts: string;
}
export interface SlackMentionEvent extends SlackMessageEvent {
mentionedUserId: string;
}
export interface SlackReactionEvent {
teamId: string;
channelId: string;
userId: string;
reaction: string;
itemTs: string;
added: boolean;
}
export interface SlackCommandHandler {
register(command: string, handler: CommandHandler): void;
}
export type CommandHandler = (ctx: CommandContext) => Promise<CommandResult>;
export interface CommandContext {
command: string;
text: string;
userId: string;
channelId: string;
teamId: string;
responseUrl: string;
}
export interface CommandResult {
public: boolean;
text?: string;
blocks?: Block[];
}
export interface BlockBuilder {
section(text: string): BlockBuilder;
divider(): BlockBuilder;
context(...elements: string[]): BlockBuilder;
actions(actionId: string, buttons: Button[]): BlockBuilder;
build(): Block[];
}
export interface Block {
type: string;
[key: string]: unknown;
}
export interface Button {
text: string;
actionId: string;
value?: string;
style?: 'primary' | 'danger';
}

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,MAAM,WAAW,cAAc;IAC7B,OAAO,EAAE,eAAe,CAAC;IACzB,QAAQ,EAAE,gBAAgB,CAAC;CAC5B;AAED,MAAM,WAAW,eAAe;IAC9B,QAAQ,CAAC,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,cAAc,GAAG,IAAI,CAAC;IACtD,MAAM,CAAC,OAAO,EAAE,cAAc,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC;CAC7D;AAED,MAAM,MAAM,cAAc,GAAG,CAAC,OAAO,EAAE,cAAc,KAAK,OAAO,CAAC,eAAe,CAAC,CAAC;AAEnF,MAAM,WAAW,cAAc;IAC7B,IAAI,EAAE,OAAO,CAAC;IACd,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAChC,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAC/B;AAED,MAAM,WAAW,eAAe;IAC9B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,IAAI,CAAC,EAAE,OAAO,CAAC;CAChB;AAED,MAAM,WAAW,cAAc;IAC7B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CACjC;AAED,MAAM,WAAW,gBAAgB;IAC/B,SAAS,CAAC,QAAQ,EAAE,eAAe,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC;IACtD,QAAQ,CAAC,SAAS,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,GAAG,OAAO,CAAC,cAAc,CAAC,CAAC;IACvE,KAAK,CAAC,UAAU,EAAE,MAAM,GAAG,OAAO,CAAC,cAAc,CAAC,CAAC;CACpD;AAED,MAAM,WAAW,eAAe;IAC9B,GAAG,EAAE,MAAM,CAAC;IACZ,MAAM,EAAE,MAAM,CAAC;IACf,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAClC;AAED,MAAM,WAAW,cAAc;IAC7B,OAAO,EAAE,OAAO,CAAC;IACjB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB"}

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":";AAAA;;GAEG"}

View File

@@ -0,0 +1,51 @@
/**
* Webhook Integration - Inbound and Outbound
*/
export interface WebhookManager {
inbound: InboundWebhooks;
outbound: OutboundWebhooks;
}
export interface InboundWebhooks {
register(path: string, handler: WebhookHandler): void;
verify(request: WebhookRequest, signature: string): boolean;
}
export type WebhookHandler = (payload: WebhookPayload) => Promise<WebhookResponse>;
export interface WebhookPayload {
body: unknown;
headers: Record<string, string>;
query: Record<string, string>;
}
export interface WebhookResponse {
status?: number;
body?: unknown;
}
export interface WebhookRequest {
body: string;
headers: Record<string, string>;
}
export interface OutboundWebhooks {
configure(endpoint: WebhookEndpoint): Promise<string>;
dispatch(webhookId: string, payload: unknown): Promise<DispatchResult>;
retry(dispatchId: string): Promise<DispatchResult>;
}
export interface WebhookEndpoint {
url: string;
secret: string;
events: string[];
headers?: Record<string, string>;
}
export interface DispatchResult {
success: boolean;
statusCode?: number;
latencyMs?: number;
error?: string;
}