Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://[email protected]/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
beforeSendTransaction: event => {
// Filter out mock express server transactions
if (event.transaction.includes('/v1/messages') || event.transaction.includes('/v1/chat/completions')) {
return null;
}
return event;
},
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://[email protected]/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: false,
transport: loggingTransport,
beforeSendTransaction: event => {
// Filter out mock express server transactions
if (event.transaction.includes('/v1/messages') || event.transaction.includes('/v1/chat/completions')) {
return null;
}
return event;
},
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import { initChatModel } from 'langchain';

function startMockOpenAIServer() {
const app = express();
app.use(express.json());

app.post('/v1/chat/completions', (req, res) => {
const model = req.body.model;

if (model === 'error-model') {
res.status(404).json({
error: {
message: 'Model not found',
type: 'invalid_request_error',
param: null,
code: 'model_not_found',
},
});
return;
}

// Simulate OpenAI response
res.json({
id: 'chatcmpl-init-test-123',
object: 'chat.completion',
created: 1677652288,
model: model,
system_fingerprint: 'fp_44709d6fcb',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Hello from initChatModel!',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 8,
completion_tokens: 12,
total_tokens: 20,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockOpenAIServer();
const baseUrl = `http://localhost:${server.address().port}/v1`;

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
// Set OpenAI API key in environment
process.env.OPENAI_API_KEY = 'mock-api-key';

// Test 1: Initialize chat model using unified API with model string
const model1 = await initChatModel('gpt-4o', {
temperature: 0.7,
maxTokens: 100,
modelProvider: 'openai',
configurableFields: ['model'],
configuration: {
baseURL: baseUrl,
},
});

await model1.invoke('Tell me about LangChain');

// Test 2: Initialize with different model
const model2 = await initChatModel('gpt-3.5-turbo', {
temperature: 0.5,
modelProvider: 'openai',
configuration: {
baseURL: baseUrl,
},
});

await model2.invoke([
{ role: 'system', content: 'You are a helpful assistant' },
{ role: 'user', content: 'What is AI?' },
]);

// Test 3: Error handling
try {
const errorModel = await initChatModel('error-model', {
modelProvider: 'openai',
configuration: {
baseURL: baseUrl,
},
});
await errorModel.invoke('This will fail');
} catch {
// Expected error
}
});

await Sentry.flush(2000);

server.close();
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import { ChatAnthropic } from '@langchain/anthropic';
import * as Sentry from '@sentry/node';
import express from 'express';

function startMockAnthropicServer() {
const app = express();
app.use(express.json());

app.post('/v1/messages', (req, res) => {
const model = req.body.model;

res.json({
id: 'msg_truncation_test',
type: 'message',
role: 'assistant',
content: [
{
type: 'text',
text: 'Response to truncated messages',
},
],
model: model,
stop_reason: 'end_turn',
stop_sequence: null,
usage: {
input_tokens: 10,
output_tokens: 15,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockAnthropicServer();
const baseUrl = `http://localhost:${server.address().port}`;

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const model = new ChatAnthropic({
model: 'claude-3-5-sonnet-20241022',
apiKey: 'mock-api-key',
clientOptions: {
baseURL: baseUrl,
},
});

const largeContent1 = 'A'.repeat(15000); // ~15KB
const largeContent2 = 'B'.repeat(15000); // ~15KB
const largeContent3 = 'C'.repeat(25000); // ~25KB (will be truncated)

// Create one very large string that gets truncated to only include Cs
await model.invoke(largeContent3 + largeContent2);

// Create an array of messages that gets truncated to only include the last message (result should again contain only Cs)
await model.invoke([
{ role: 'system', content: largeContent1 },
{ role: 'user', content: largeContent2 },
{ role: 'user', content: largeContent3 },
]);
});

await Sentry.flush(2000);

server.close();
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import * as Sentry from '@sentry/node';
import express from 'express';

function startMockAnthropicServer() {
const app = express();
app.use(express.json());

app.post('/v1/messages', (req, res) => {
res.json({
id: 'msg_test123',
type: 'message',
role: 'assistant',
content: [
{
type: 'text',
text: 'Mock response from Anthropic!',
},
],
model: req.body.model,
stop_reason: 'end_turn',
stop_sequence: null,
usage: {
input_tokens: 10,
output_tokens: 15,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockAnthropicServer();
const baseURL = `http://localhost:${server.address().port}`;

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
// EDGE CASE: Import and instantiate Anthropic client BEFORE LangChain is imported
// This simulates the timing issue where a user creates an Anthropic client in one file
// before importing LangChain in another file
const { default: Anthropic } = await import('@anthropic-ai/sdk');
const anthropicClient = new Anthropic({
apiKey: 'mock-api-key',
baseURL,
});

// Use the Anthropic client directly - this will be instrumented by the Anthropic integration
await anthropicClient.messages.create({
model: 'claude-3-5-sonnet-20241022',
messages: [{ role: 'user', content: 'Direct Anthropic call' }],
temperature: 0.7,
max_tokens: 100,
});

// NOW import LangChain - at this point it will mark Anthropic to be skipped
// But the client created above is already instrumented
const { ChatAnthropic } = await import('@langchain/anthropic');

// Create a LangChain model - this uses Anthropic under the hood
const langchainModel = new ChatAnthropic({
model: 'claude-3-5-sonnet-20241022',
temperature: 0.7,
maxTokens: 100,
apiKey: 'mock-api-key',
clientOptions: {
baseURL,
},
});

// Use LangChain - this will be instrumented by LangChain integration
await langchainModel.invoke('LangChain Anthropic call');

// Create ANOTHER Anthropic client after LangChain was imported
// This one should NOT be instrumented (skip mechanism works correctly)
const anthropicClient2 = new Anthropic({
apiKey: 'mock-api-key',
baseURL,
});

await anthropicClient2.messages.create({
model: 'claude-3-5-sonnet-20241022',
messages: [{ role: 'user', content: 'Second direct Anthropic call' }],
temperature: 0.7,
max_tokens: 100,
});
});

await Sentry.flush(2000);
server.close();
}

run();
Loading