Interface BaseLanguageModelInterface<RunOutput, CallOptions>

Base interface implemented by all runnables. Used for cross-compatibility between different versions of LangChain core.

Should not change on patch releases.

interface BaseLanguageModelInterface<RunOutput, CallOptions> {
    get callKeys(): string[];
    batch(inputs: BaseLanguageModelInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {
        returnExceptions?: false;
    }): Promise<RunOutput[]>;
    batch(inputs: BaseLanguageModelInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {
        returnExceptions: true;
    }): Promise<(Error | RunOutput)[]>;
    batch(inputs: BaseLanguageModelInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(Error | RunOutput)[]>;
    generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
    getName(suffix?: string): string;
    getNumTokens(content: MessageContent): Promise<number>;
    invoke(input: BaseLanguageModelInput, options?: Partial<CallOptions>): Promise<RunOutput>;
    predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
    predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
    serialize(): SerializedLLM;
    stream(input: BaseLanguageModelInput, options?: Partial<CallOptions>): Promise<IterableReadableStreamInterface<RunOutput>>;
    transform(generator: AsyncGenerator<BaseLanguageModelInput, any, unknown>, options: Partial<CallOptions>): AsyncGenerator<RunOutput, any, unknown>;
}

Type Parameters

Hierarchy (view full)

Implemented by

Accessors

Methods