CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/npm-langchain--core

Core LangChain.js abstractions and schemas for building applications with Large Language Models

Pending
Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

SecuritybySnyk

Pending

The risk profile of this skill

Overview
Eval results
Files

language-models.mddocs/

Language Models

Abstract base classes and interfaces for language models and chat models with unified APIs. These abstractions enable any provider to implement LLM functionality that integrates seamlessly with the broader LangChain ecosystem.

Capabilities

Base Language Model

Abstract base class for all language model implementations.

/**
 * Abstract base class for all language models
 * @template RunOutput - Type of model output (typically string or BaseMessage)
 * @template CallOptions - Options for model invocation
 */
abstract class BaseLanguageModel<RunOutput = any, CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends Runnable<BaseLanguageModelInput, RunOutput, CallOptions> {
  /** Async caller for managing API calls */
  caller: AsyncCaller;
  
  /** Generate text from prompts */
  abstract generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
  
  /** Generate from PromptValue objects */
  generatePrompt(promptValues: BasePromptValue[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
  
  /** Get number of tokens in text */
  getNumTokens(text: string): Promise<number>;
  
  /** Predict single output from single input */
  predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
  
  /** Predict messages from text input */
  predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
  
  /** Get type identifier for serialization */
  _modelType(): string;
  
  /** Get model parameters for LangSmith */
  getLsParams(options?: this["ParsedCallOptions"]): LangSmithParams;
}

Base Chat Model

Abstract base class for chat-based language models.

/**
 * Abstract base class for chat models
 * @template CallOptions - Options for chat model calls
 * @template OutputMessageType - Type of output messages
 */
abstract class BaseChatModel<CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions, OutputMessageType extends BaseMessage = BaseMessage> extends BaseLanguageModel<OutputMessageType, CallOptions> {
  /** Bind tools to the chat model */
  bindTools(tools: BindToolsInput, kwargs?: Record<string, unknown>): BaseChatModel<CallOptions, OutputMessageType>;
  
  /** Enable structured output with schema validation */
  withStructuredOutput<T>(outputSchema: z.ZodSchema<T> | Record<string, unknown>, config?: StructuredOutputConfig): Runnable<BaseMessage[], T>;
  
  /** Core generation method - must be implemented by subclasses */
  abstract _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
  
  /** Stream response chunks */
  _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
  
  /** Get model identifier */
  _modelType(): "base_chat_model";
  
  /** Get LangSmith parameters */
  getLsParams(options?: this["ParsedCallOptions"]): LangSmithParams;
}

Usage Examples:

// Example chat model implementation
class MyChatModel extends BaseChatModel {
  async _generate(messages: BaseMessage[], options: any): Promise<ChatResult> {
    // Implementation specific logic
    const response = await this.callAPI(messages, options);
    return {
      generations: [{
        message: new AIMessage(response.content),
        generationInfo: response.metadata
      }]
    };
  }
}

// Using a chat model
const model = new MyChatModel();
const messages = [
  new SystemMessage("You are a helpful assistant"),
  new HumanMessage("What is 2+2?")
];

const result = await model.invoke(messages);
console.log(result.content); // AI response

// Bind tools to model
const modelWithTools = model.bindTools([
  {
    name: "calculator",
    description: "Perform calculations",
    parameters: {
      type: "object",
      properties: {
        operation: { type: "string" },
        a: { type: "number" },
        b: { type: "number" }
      }
    }
  }
]);

Simple Chat Model

Simplified chat model base class for easier implementation.

/**
 * Simplified chat model base class
 * @template CallOptions - Call options type
 */
abstract class SimpleChatModel<CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions> extends BaseChatModel<CallOptions> {
  /** Simplified call method returning string */
  abstract _call(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
  
  /** Convert string response to chat result */
  async _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
}

Usage Examples:

// Simplified implementation
class MySimpleChatModel extends SimpleChatModel {
  async _call(messages: BaseMessage[]): Promise<string> {
    // Just return a string - framework handles the rest
    return "This is a simple response";
  }
}

Base LLM

Abstract base class for completion-style language models.

/**
 * Abstract base class for completion-style LLMs
 * @template CallOptions - Call options type
 */
abstract class BaseLLM<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseLanguageModel<string, CallOptions> {
  /** Generate completions from text prompts */
  abstract _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
  
  /** Stream completion chunks */
  _streamResponseChunks(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
  
  /** Get model type */
  _modelType(): "base_llm";
}

LLM

Simplified LLM base class.

/**
 * Simplified LLM base class
 * @template CallOptions - Call options type
 */
abstract class LLM<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseLLM<CallOptions> {
  /** Simplified call method */
  abstract _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
  
  /** Convert call result to generation result */
  async _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
}

Tool Binding

Tool Binding Input

/**
 * Input for binding tools to models
 */
type BindToolsInput = (StructuredToolInterface | Record<string, unknown> | RunnableToolLike)[];

interface ToolDefinition {
  name: string;
  description: string;
  parameters: Record<string, unknown>;
}

interface RunnableToolLike {
  name: string;
  description?: string;
  parameters?: Record<string, unknown>;
  invoke(input: unknown): Promise<unknown>;
}

Structured Output Configuration

interface StructuredOutputConfig {
  /** Method for structured output */
  method?: "function_calling" | "json_mode";
  /** Whether to include raw output */
  includeRaw?: boolean;
  /** JSON schema for validation */
  schema?: Record<string, unknown>;
}

Model Results

LLM Result

interface LLMResult {
  /** Generated completions */
  generations: Generation[][];
  /** Token usage information */
  llmOutput?: Record<string, unknown>;
  /** Run information for callbacks */
  run?: Run[];
}

interface Generation {
  /** Generated text */
  text: string;
  /** Additional generation info */
  generationInfo?: Record<string, unknown>;
}

interface GenerationChunk extends Generation {
  /** Combine with another chunk */
  concat(chunk: GenerationChunk): GenerationChunk;
}

Chat Result

interface ChatResult {
  /** Generated chat completions */
  generations: ChatGeneration[];
  /** Model output metadata */
  llmOutput?: Record<string, unknown>;
}

interface ChatGeneration {
  /** Generated message */
  message: BaseMessage;
  /** Additional generation info */
  generationInfo?: Record<string, unknown>;
}

interface ChatGenerationChunk extends ChatGeneration {
  /** Generated message chunk */
  message: BaseMessageChunk;
  /** Combine with another chunk */
  concat(chunk: ChatGenerationChunk): ChatGenerationChunk;
}

Call Options

interface BaseLanguageModelCallOptions extends RunnableConfig {
  /** Stop sequences for generation */
  stop?: string[];
  /** Timeout for model calls */
  timeout?: number;
  /** Additional model-specific options */
  [key: string]: unknown;
}

interface BaseChatModelCallOptions extends BaseLanguageModelCallOptions {
  /** Tool choice strategy */
  tool_choice?: ToolChoice;
  /** Tools available to the model */
  tools?: ToolDefinition[];
}

type ToolChoice = "auto" | "none" | "required" | { type: "function"; function: { name: string } };

Types

type BaseLanguageModelInput = string | BaseMessage[];

interface LangSmithParams {
  /** LangSmith run name */
  ls_name?: string;
  /** LangSmith run type */
  ls_type?: string;
  /** Provider name */
  ls_provider?: string;
  /** Model name */
  ls_model_name?: string;
  /** Model type */
  ls_model_type?: string;
  /** Temperature setting */
  ls_temperature?: number;
  /** Max tokens */
  ls_max_tokens?: number;
  /** Stop sequences */
  ls_stop?: string[];
}

interface BasePromptValue {
  toString(): string;
  toChatMessages(): BaseMessage[];
}

docs

agents.md

caches.md

callbacks.md

documents.md

embeddings.md

index.md

language-models.md

memory-storage.md

messages.md

output-parsers.md

prompts.md

retrievers.md

runnables.md

tools.md

vectorstores.md

tile.json