Skip to content

Commit 5a6f26b

Browse files
authored
feat(core): add namespace-based symbol branding for error class hierarchies (#10081)
1 parent 7be50a7 commit 5a6f26b

8 files changed

Lines changed: 823 additions & 93 deletions

File tree

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
---
2+
"@langchain/core": patch
3+
"@langchain/google": patch
4+
"@langchain/anthropic": patch
5+
"@langchain/openai": patch
6+
---
7+
8+
feat(core): add namespace-based symbol branding for error class hierarchies
9+
10+
Introduces `createNamespace` utility for hierarchical symbol-based branding of class hierarchies.
11+
All LangChain error classes now use this pattern, replacing hand-rolled duck-type `isInstance` checks
12+
with reliable cross-realm `Symbol.for`-based identity.
13+
14+
- New `LangChainError` base class that all LangChain errors extend
15+
- New `createNamespace` / `Namespace` API in `@langchain/core/utils/namespace`
16+
- Refactored `ModelAbortError`, `ContextOverflowError` to use namespace branding
17+
- Added `ContextOverflowError.fromError()` static factory method
18+
- Deprecated `addLangChainErrorFields` in favor of `LangChainError` subclasses
19+
- Migrated Google provider errors (`GoogleError`, `ConfigurationError`, etc.) to namespace branding
20+
- Updated Anthropic and OpenAI providers to use `ContextOverflowError.fromError()`
Lines changed: 133 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
/* eslint-disable @typescript-eslint/no-explicit-any */
22

33
import type { AIMessageChunk } from "../messages/ai.js";
4+
import { ns as baseNs } from "../utils/namespace.js";
45

56
export type LangChainErrorCodes =
67
| "CONTEXT_OVERFLOW"
@@ -13,6 +14,7 @@ export type LangChainErrorCodes =
1314
| "OUTPUT_PARSING_FAILURE"
1415
| "MODEL_ABORTED";
1516

17+
/** @deprecated Subclass LangChainError instead */
1618
export function addLangChainErrorFields(
1719
error: any,
1820
lc_error_code: LangChainErrorCodes
@@ -22,75 +24,160 @@ export function addLangChainErrorFields(
2224
return error;
2325
}
2426

27+
/** The error namespace for all LangChain errors */
28+
export const ns = baseNs.sub("error");
29+
2530
/**
26-
* Error thrown when a model invocation is aborted via an AbortSignal.
27-
* Contains any partial output that was generated before the abort.
31+
* Base error class for all LangChain errors.
32+
*
33+
* All LangChain error classes should extend this class (directly or
34+
* indirectly). Use `LangChainError.isInstance(obj)` to check if an
35+
* object is any LangChain error.
36+
*
37+
* @example
38+
* ```typescript
39+
* try {
40+
* await model.invoke("hello");
41+
* } catch (error) {
42+
* if (LangChainError.isInstance(error)) {
43+
* console.log("Got a LangChain error:", error.message);
44+
* }
45+
* }
46+
* ```
2847
*/
29-
export class ModelAbortError extends Error {
30-
readonly name = "ModelAbortError";
48+
export class LangChainError extends ns.brand(Error) {
49+
readonly name: string = "LangChainError";
3150

32-
readonly lc_error_code = "MODEL_ABORTED";
51+
constructor(message?: string) {
52+
super(message);
53+
if (Error.captureStackTrace) {
54+
Error.captureStackTrace(this, this.constructor);
55+
}
56+
}
57+
}
58+
59+
/**
60+
* Error class representing an aborted model operation in LangChain.
61+
*
62+
* This error is thrown when a model operation (such as invocation, streaming, or batching)
63+
* is cancelled before it completes, commonly due to a user-initiated abort signal
64+
* (e.g., via an AbortController) or an upstream cancellation event.
65+
*
66+
* The ModelAbortError provides access to any partial output the model may have produced
67+
* before the operation was interrupted, which can be useful for resuming work, debugging,
68+
* or presenting incomplete results to users.
69+
*
70+
* @remarks
71+
* - The `partialOutput` field includes message content that was generated prior to the abort,
72+
* such as a partial AIMessageChunk.
73+
* - This error extends the {@link LangChainError} base class with the marker `"model-abort"`.
74+
*
75+
* @example
76+
* ```typescript
77+
* try {
78+
* await model.invoke(input, { signal: abortController.signal });
79+
* } catch (err) {
80+
* if (ModelAbortError.isInstance(err)) {
81+
* // Handle user cancellation, check err.partialOutput if needed
82+
* } else {
83+
* throw err;
84+
* }
85+
* }
86+
* ```
87+
*/
88+
export class ModelAbortError extends ns.brand(LangChainError, "model-abort") {
89+
readonly name = "ModelAbortError";
3390

3491
/**
35-
* The partial message output that was accumulated before the abort.
36-
* This allows callers to access whatever content was generated
37-
* before the operation was cancelled.
92+
* The partial message output that was produced before the operation was aborted.
93+
* This is typically an AIMessageChunk, or could be undefined if no output was available.
3894
*/
3995
readonly partialOutput?: AIMessageChunk;
4096

97+
/**
98+
* Constructs a new ModelAbortError instance.
99+
*
100+
* @param message - A human-readable message describing the abort event.
101+
* @param partialOutput - Any partial model output generated before the abort (optional).
102+
*/
41103
constructor(message: string, partialOutput?: AIMessageChunk) {
42104
super(message);
43105
this.partialOutput = partialOutput;
44-
// Maintains proper stack trace for where our error was thrown (only available on V8)
45-
if (Error.captureStackTrace) {
46-
Error.captureStackTrace(this, ModelAbortError);
47-
}
48-
}
49-
50-
/**
51-
* Type guard to check if an error is a ModelAbortError
52-
*/
53-
static isInstance(error: unknown): error is ModelAbortError {
54-
return (
55-
typeof error === "object" &&
56-
error !== null &&
57-
"name" in error &&
58-
error.name === "ModelAbortError" &&
59-
"lc_error_code" in error &&
60-
error.lc_error_code === "MODEL_ABORTED"
61-
);
62106
}
63107
}
64108

65109
/**
66-
* Error thrown when input exceeds the model's context limit.
110+
* Error class representing a context window overflow in a language model operation.
67111
*
68-
* This exception is raised by chat models when the input tokens exceed
69-
* the maximum context window supported by the model.
112+
* This error is thrown when the combined input to a language model (such as prompt tokens,
113+
* historical messages, and/or instructions) exceeds the maximum context window or token limit
114+
* that the model can process in a single request. Most models have defined upper limits for the number of
115+
* tokens or characters allowed in a context, and exceeding this limit will prevent
116+
* the operation from proceeding.
117+
*
118+
* The {@link ContextOverflowError} extends the {@link LangChainError} base class with
119+
* the marker `"context-overflow"`.
120+
*
121+
* @remarks
122+
* - Use this error to programmatically identify cases where a user request, prompt, or input
123+
* sequence is too long to be handled by the target model.
124+
* - Model providers and framework integrations should throw this error if they detect
125+
* a request cannot be processed due to its size.
126+
*
127+
* @example
128+
* ```typescript
129+
* try {
130+
* await model.invoke(veryLongInput);
131+
* } catch (err) {
132+
* if (ContextOverflowError.isInstance(err)) {
133+
* // Handle overflow, e.g., prompt user to shorten input or truncate text
134+
* console.warn("Model context overflow:", err.message);
135+
* } else {
136+
* throw err;
137+
* }
138+
* }
139+
* ```
70140
*/
71-
export class ContextOverflowError extends Error {
141+
export class ContextOverflowError extends ns.brand(
142+
LangChainError,
143+
"context-overflow"
144+
) {
72145
readonly name = "ContextOverflowError";
73146

74-
readonly lc_error_code = "CONTEXT_OVERFLOW";
147+
/**
148+
* The underlying error that caused this {@link ContextOverflowError}, if any.
149+
*
150+
* This property is optionally set when wrapping a lower-level error using {@link ContextOverflowError.fromError}.
151+
* It allows error handlers to access or inspect the original error that led to the context overflow.
152+
*/
153+
cause?: Error;
75154

76-
constructor(message: string, options?: ErrorOptions) {
77-
super(message, options);
78-
if (Error.captureStackTrace) {
79-
Error.captureStackTrace(this, ContextOverflowError);
80-
}
155+
constructor(message?: string) {
156+
super(message ?? "Input exceeded the model's context window.");
81157
}
82158

83159
/**
84-
* Type guard to check if an error is a ContextOverflowError
160+
* Creates a new {@link ContextOverflowError} instance from an existing error.
161+
*
162+
* This static utility copies the message from the provided error and
163+
* attaches the original error as the {@link ContextOverflowError.cause} property,
164+
* enabling error handlers to inspect or propagate the original failure.
165+
*
166+
* @param obj - The original error object causing the context overflow.
167+
* @returns A new {@link ContextOverflowError} instance with the original error set as its cause.
168+
*
169+
* @example
170+
* ```typescript
171+
* try {
172+
* await model.invoke(input);
173+
* } catch (err) {
174+
* throw ContextOverflowError.fromError(err);
175+
* }
176+
* ```
85177
*/
86-
static isInstance(error: unknown): error is ContextOverflowError {
87-
return (
88-
typeof error === "object" &&
89-
error !== null &&
90-
"name" in error &&
91-
error.name === "ContextOverflowError" &&
92-
"lc_error_code" in error &&
93-
error.lc_error_code === "CONTEXT_OVERFLOW"
94-
);
178+
static fromError(obj: Error): ContextOverflowError {
179+
const error = new ContextOverflowError(obj.message);
180+
error.cause = obj;
181+
return error;
95182
}
96183
}

0 commit comments

Comments
 (0)