git-subtree-dir: vendor/ruvector git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
42 lines
1.3 KiB
TypeScript
42 lines
1.3 KiB
TypeScript
/**
|
|
* INTERMEDIATE TUTORIAL: Multi-Model Comparison
|
|
*
|
|
* Compare multiple AI models (Gemini, Claude, GPT-4) to find the best
|
|
* performer for your specific task. Includes benchmarking, cost tracking,
|
|
* and performance metrics.
|
|
*
|
|
* What you'll learn:
|
|
* - Running parallel model comparisons
|
|
* - Benchmarking quality and speed
|
|
* - Tracking costs per model
|
|
* - Selecting the best model for production
|
|
*
|
|
* Prerequisites:
|
|
* - Set API keys: GEMINI_API_KEY, ANTHROPIC_API_KEY, OPENAI_API_KEY
|
|
* - npm install dspy.ts @ruvector/agentic-synth
|
|
*
|
|
* Run: npx tsx examples/intermediate/multi-model-comparison.ts
|
|
*/
|
|
import { Prediction } from 'dspy.ts';
|
|
interface ModelConfig {
|
|
name: string;
|
|
provider: string;
|
|
model: string;
|
|
apiKey: string;
|
|
costPer1kTokens: number;
|
|
capabilities: string[];
|
|
}
|
|
declare const models: ModelConfig[];
|
|
interface BenchmarkResult {
|
|
modelName: string;
|
|
qualityScore: number;
|
|
avgResponseTime: number;
|
|
estimatedCost: number;
|
|
successRate: number;
|
|
outputs: Prediction[];
|
|
errors: string[];
|
|
}
|
|
declare function benchmarkModel(config: ModelConfig): Promise<BenchmarkResult>;
|
|
declare function runComparison(): Promise<BenchmarkResult[]>;
|
|
export { runComparison, benchmarkModel, models };
|
|
//# sourceMappingURL=multi-model-comparison.d.ts.map
|