interface StreamingRunnerV3Options {
    streamingLlmCallFn: ((args: {
        input: any[];
        max_tokens?: number;
        model: string;
        signal?: AbortSignal;
        temperature?: number;
        text_format?: {
            name: string;
            schema: Record<string, any>;
            strict: boolean;
            type: "json_schema";
        };
    }) => Promise<{
        data: Promise<any>;
        stream: AsyncIterable<string>;
    }>);
    verbose?: boolean;
}

Properties

streamingLlmCallFn: ((args: {
    input: any[];
    max_tokens?: number;
    model: string;
    signal?: AbortSignal;
    temperature?: number;
    text_format?: {
        name: string;
        schema: Record<string, any>;
        strict: boolean;
        type: "json_schema";
    };
}) => Promise<{
    data: Promise<any>;
    stream: AsyncIterable<string>;
}>)

The streaming LLM call function. Must return { stream: AsyncIterable, data: Promise }

verbose?: boolean

Log every raw chunk to console (togglable at runtime via runner.verbose)