Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import type { EngineResponse } from 'n8n-workflow';

import { buildSteps } from '@utils/agent-execution';
import type { TokenUsageData } from '@utils/agent-execution/AgentTokenTracker';

import type { RequestResponseMetadata } from '../types';

Expand All @@ -12,18 +13,46 @@ import type { RequestResponseMetadata } from '../types';
*
* @param response - The optional engine response from previous tool execution
* @param itemIndex - The current item index being processed
* @returns Metadata object with previousRequests and iterationCount
* @param currentTokens - Optional token usage from the current iteration
* @returns Metadata object with previousRequests, iterationCount, and accumulated tokens
*
*/
export function buildResponseMetadata(
response: EngineResponse<RequestResponseMetadata> | undefined,
itemIndex: number,
currentTokens?: TokenUsageData,
): RequestResponseMetadata {
const currentIterationCount = response?.metadata?.iterationCount ?? 0;
const previousRequests = buildSteps(response, itemIndex);

// Merge current iteration tokens with previous accumulated tokens
let accumulatedTokens: TokenUsageData | undefined;
if (currentTokens || response?.metadata?.accumulatedTokens) {
const prev = response?.metadata?.accumulatedTokens ?? {
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
isEstimate: false,
};
const curr = currentTokens ?? {
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
isEstimate: false,
};

accumulatedTokens = {
promptTokens: prev.promptTokens + curr.promptTokens,
completionTokens: prev.completionTokens + curr.completionTokens,
totalTokens: prev.totalTokens + curr.totalTokens,
isEstimate: prev.isEstimate || curr.isEstimate,
};
}

return {
previousRequests: buildSteps(response, itemIndex),
itemIndex,
previousRequests,
iterationCount: currentIterationCount + 1,
accumulatedTokens,
};
}
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { NodeOperationError } from 'n8n-workflow';
import type { IExecuteFunctions, ISupplyDataFunctions, EngineResponse } from 'n8n-workflow';

import { buildSteps, type ToolCallData } from '@utils/agent-execution';
import { AgentTokenTracker } from '@utils/agent-execution/AgentTokenTracker';
import { getPromptInputByType } from '@utils/helpers';
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
Expand All @@ -22,6 +23,7 @@ export type ItemContext = {
prompt: ChatPromptTemplate;
options: AgentOptions;
outputParser: N8nOutputParser | undefined;
tokenTracker: AgentTokenTracker;
};

/**
Expand Down Expand Up @@ -66,6 +68,9 @@ export async function prepareItemContext(
});
const prompt: ChatPromptTemplate = preparePrompt(messages);

// Create token tracker for this item
const tokenTracker = new AgentTokenTracker();

return {
itemIndex,
input,
Expand All @@ -74,5 +79,6 @@ export async function prepareItemContext(
prompt,
options,
outputParser,
tokenTracker,
};
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,59 @@ import {
createEngineRequests,
saveToMemory,
} from '@utils/agent-execution';
import { modelPricingService } from '@utils/modelPricing';

import { SYSTEM_MESSAGE } from '../../prompt';
import type { AgentResult, RequestResponseMetadata } from '../types';
import { buildResponseMetadata } from './buildResponseMetadata';
import type { ItemContext } from './prepareItemContext';

type RunAgentResult = AgentResult | EngineRequest<RequestResponseMetadata>;

/**
* Helper function to add cost calculation to token usage
*/
async function addCostToTokenUsage(
tokenUsage: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
isEstimate: boolean;
},
model: BaseChatModel,
): Promise<typeof tokenUsage & { estimatedCost?: number | null; modelName?: string }> {
// Get model name from the model instance
// Try multiple properties as different LLM providers use different naming
const modelAny = model as any;
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Nov 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rule violated: Prefer Typeguards over Type casting

Casting model to any to read metadata bypasses the “Prefer Typeguards over Type casting” rule. Please add explicit typing (e.g., intersecting BaseChatModel with the optional metadata properties) or a type guard instead of erasing the type safety.

Prompt for AI agents
Address the following comment on packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/runAgent.ts at line 40:

<comment>Casting `model` to `any` to read metadata bypasses the “Prefer Typeguards over Type casting” rule. Please add explicit typing (e.g., intersecting `BaseChatModel` with the optional metadata properties) or a type guard instead of erasing the type safety.</comment>

<file context>
@@ -14,13 +14,59 @@ import {
+): Promise&lt;typeof tokenUsage &amp; { estimatedCost?: number | null; modelName?: string }&gt; {
+	// Get model name from the model instance
+	// Try multiple properties as different LLM providers use different naming
+	const modelAny = model as any;
+
+	const modelNameRaw =
</file context>
Suggested change
const modelAny = model as any;
const modelAny: BaseChatModel & Partial<{ modelName: unknown; model: unknown; name: unknown; _modelName: unknown; caller: { model?: unknown }; client: { modelName?: unknown }; _modelType: () => unknown }> = model;
Fix with Cubic


const modelNameRaw =
modelAny.modelName || // Most common (OpenAI, Anthropic, etc.)
modelAny.model || // Alternative property
modelAny.name || // Some providers
modelAny._modelName || // Private property fallback
modelAny.caller?.model || // Nested in caller
modelAny.client?.modelName || // Nested in client
modelAny._modelType?.() || // Method fallback
'unknown';

// Ensure modelName is a string
const modelName =
typeof modelNameRaw === 'string' ? modelNameRaw : String(modelNameRaw || 'unknown');

// Calculate cost
const estimatedCost = await modelPricingService.calculateCost(
modelName,
tokenUsage.promptTokens,
tokenUsage.completionTokens,
);

return {
...tokenUsage,
estimatedCost,
modelName,
};
}

/**
* Runs the agent for a single item, choosing between streaming or non-streaming execution.
* Handles both regular execution and execution after tool calls.
Expand Down Expand Up @@ -50,7 +96,10 @@ export async function runAgent(
formatting_instructions:
'IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer.',
};
const executeOptions = { signal: ctx.getExecutionCancelSignal() };
const executeOptions = {
signal: ctx.getExecutionCancelSignal(),
callbacks: [itemContext.tokenTracker],
};

// Check if streaming is actually available
const isStreamingAvailable = 'isStreaming' in ctx ? ctx.isStreaming?.() : undefined;
Expand Down Expand Up @@ -85,22 +134,42 @@ export async function runAgent(
// If result contains tool calls, build the request object like the normal flow
if (result.toolCalls && result.toolCalls.length > 0) {
const actions = await createEngineRequests(result.toolCalls, itemIndex, tools);
const currentTokens = itemContext.tokenTracker.getAccumulatedTokens();

return {
actions,
metadata: buildResponseMetadata(response, itemIndex),
metadata: buildResponseMetadata(response, itemIndex, currentTokens),
};
}

// Add token usage to final streaming result
const currentTokens = itemContext.tokenTracker.getAccumulatedTokens();
// Merge with previous accumulated tokens if this is a continuation
if (response?.metadata?.accumulatedTokens) {
const mergedTokens = {
promptTokens: response.metadata.accumulatedTokens.promptTokens + currentTokens.promptTokens,
completionTokens:
response.metadata.accumulatedTokens.completionTokens + currentTokens.completionTokens,
totalTokens: response.metadata.accumulatedTokens.totalTokens + currentTokens.totalTokens,
isEstimate: response.metadata.accumulatedTokens.isEstimate || currentTokens.isEstimate,
};
result.tokenUsage = await addCostToTokenUsage(mergedTokens, model);
} else if (currentTokens.totalTokens > 0) {
result.tokenUsage = await addCostToTokenUsage(currentTokens, model);
}

return result;
} else {
// Handle regular execution
const chatHistory = await loadMemory(memory, model, options.maxTokensFromMemory);

const modelResponse = await executor.invoke({
...invokeParams,
chat_history: chatHistory,
});
const modelResponse = await executor.invoke(
{
...invokeParams,
chat_history: chatHistory,
},
executeOptions,
);

if ('returnValues' in modelResponse) {
// Save conversation to memory including any tool call context
Expand All @@ -126,15 +195,32 @@ export async function runAgent(
if (options.returnIntermediateSteps && steps.length > 0) {
result.intermediateSteps = steps;
}
// Add token usage to final result
const currentTokens = itemContext.tokenTracker.getAccumulatedTokens();
// Merge with previous accumulated tokens if this is a continuation
if (response?.metadata?.accumulatedTokens) {
const mergedTokens = {
promptTokens:
response.metadata.accumulatedTokens.promptTokens + currentTokens.promptTokens,
completionTokens:
response.metadata.accumulatedTokens.completionTokens + currentTokens.completionTokens,
totalTokens: response.metadata.accumulatedTokens.totalTokens + currentTokens.totalTokens,
isEstimate: response.metadata.accumulatedTokens.isEstimate || currentTokens.isEstimate,
};
result.tokenUsage = await addCostToTokenUsage(mergedTokens, model);
} else if (currentTokens.totalTokens > 0) {
result.tokenUsage = await addCostToTokenUsage(currentTokens, model);
}
return result;
}

// If response contains tool calls, we need to return this in the right format
const actions = await createEngineRequests(modelResponse, itemIndex, tools);
const currentTokens = itemContext.tokenTracker.getAccumulatedTokens();

return {
actions,
metadata: buildResponseMetadata(response, itemIndex),
metadata: buildResponseMetadata(response, itemIndex, currentTokens),
};
}
}
Loading