LLM usage analytics

interface LLMAnalytics {
    avg_latency_ms: number;
    by_model: Record<string, {
        avg_latency_ms: number;
        error_rate: number;
        invocations: number;
        tokens: number;
    }>;
    by_provider: Record<string, {
        avg_latency_ms: number;
        invocations: number;
        tokens: number;
    }>;
    error_count: number;
    error_rate: number;
    estimated_cost_usd: number;
    median_latency_ms: number;
    p95_latency_ms: number;
    p99_latency_ms: number;
    slow_invocations: number;
    slow_rate: number;
    time_range: {
        end: number;
        start: number;
    };
    total_completion_tokens: number;
    total_invocations: number;
    total_prompt_tokens: number;
    total_tokens: number;
}

Properties

avg_latency_ms: number
by_model: Record<string, {
    avg_latency_ms: number;
    error_rate: number;
    invocations: number;
    tokens: number;
}>
by_provider: Record<string, {
    avg_latency_ms: number;
    invocations: number;
    tokens: number;
}>
error_count: number
error_rate: number
estimated_cost_usd: number
median_latency_ms: number
p95_latency_ms: number
p99_latency_ms: number
slow_invocations: number
slow_rate: number
time_range: {
    end: number;
    start: number;
}
total_completion_tokens: number
total_invocations: number
total_prompt_tokens: number
total_tokens: number