Track all agent runs, error rates, LLM calls, tokens used, and tool executions. Monitor traffic patterns and duration metrics across your AI-powered features.
AI Monitoring built for debugging
Track LLM calls, token usage, model costs, and tool execution. Debug AI agents with full context on prompts, responses, and errors.
0 developers signed up for Sentry last week
Select SDK
// sentry.server.config.ts
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [
Sentry.openAIIntegration({
recordInputs: true,
recordOutputs: true,
}),
],
});// sentry.server.config.ts
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [
Sentry.anthropicAIIntegration({
recordInputs: true,
recordOutputs: true,
}),
],
});// sentry.server.config.ts
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [Sentry.googleGenerativeAIIntegration()],
});// sentry.server.config.ts
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [Sentry.langchainIntegration()],
});// sentry.server.config.ts
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [Sentry.langGraphIntegration()],
});// sentry.server.config.ts
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [Sentry.vercelAIIntegration()],
});import sentry_sdk
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
)import sentry_sdk
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
)import sentry_sdk
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
)import sentry_sdk
from sentry_sdk.integrations.langchain import LangchainIntegration
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
integrations=[LangchainIntegration()],
)import sentry_sdk
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
)import sentry_sdk
from sentry_sdk.integrations.litellm import LiteLLMIntegration
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
integrations=[LiteLLMIntegration()],
)import sentry_sdk
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
)import sentry_sdk
from sentry_sdk.integrations.pydantic_ai import PydanticAIIntegration
sentry_sdk.init(
dsn="___DSN___",
traces_sample_rate=1.0,
send_default_pii=True,
integrations=[PydanticAIIntegration()],
)const Sentry = require("@sentry/node");
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [
Sentry.openAIIntegration({
recordInputs: true,
recordOutputs: true,
}),
],
});const Sentry = require("@sentry/node");
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [
Sentry.anthropicAIIntegration({
recordInputs: true,
recordOutputs: true,
}),
],
});const Sentry = require("@sentry/node");
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [
Sentry.googleGenAIIntegration({
recordInputs: true,
recordOutputs: true,
}),
],
});const Sentry = require("@sentry/node");
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [Sentry.langchainIntegration()],
});const Sentry = require("@sentry/node");
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [Sentry.langGraphIntegration()],
});const Sentry = require("@sentry/node");
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
integrations: [Sentry.vercelAIIntegration()],
});import * as Sentry from "@sentry/react";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/react";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/react";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/react";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/react";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/browser";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/browser";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/browser";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/browser";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});import * as Sentry from "@sentry/browser";
Sentry.init({
dsn: "___DSN___",
tracesSampleRate: 1.0,
});Select AI Provider
// app/api/chat/route.ts
import OpenAI from "openai";
const client = new OpenAI();
export async function POST(req: Request) {
const response = await client.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "Hello!" }],
});
return Response.json({ text: response.choices[0].message.content });
}// app/api/chat/route.ts
import Anthropic from "@anthropic-ai/sdk";
const client = new Anthropic();
export async function POST(req: Request) {
const response = await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});
return Response.json({ text: response.content[0].text });
}const { GoogleGenAI } = require("@google/genai");
const client = new GoogleGenAI({ apiKey: "..." });
const response = await client.models.generateContent({
model: "gemini-2.0-flash",
contents: "Hello!",
});// app/api/chat/route.ts
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({ model: "gpt-4o" });
export async function POST(req: Request) {
const response = await model.invoke("Hello!");
return Response.json({ text: response.content });
}const { StateGraph, END } = require("@langchain/langgraph");
const { ChatOpenAI } = require("@langchain/openai");
const model = new ChatOpenAI({ model: "gpt-4o" });
const graph = new StateGraph({ channels: {} });
// Define your graph nodes and edges
const app = graph.compile();
const result = await app.invoke({ input: "Hello!" });// app/api/chat/route.ts
import { generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const result = await generateText({
model: openai("gpt-4o"),
prompt: "Hello!",
experimental_telemetry: {
isEnabled: true,
recordInputs: true,
recordOutputs: true,
},
});from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)from anthropic import Anthropic
client = Anthropic()
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}]
)from google.genai import Client
client = Client()
response = client.models.generate_content(
model="gemini-2.0-flash",
contents="Hello!"
)from langchain.chat_models import init_chat_model
model = init_chat_model("gpt-4o", model_provider="openai")
response = model.invoke("Hello!")from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4o")
graph = StateGraph(dict)
# Define your graph nodes and edges
app = graph.compile()
result = app.invoke({"input": "Hello!"})import litellm
response = litellm.completion(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)from agents import Agent, Runner
agent = Agent(
name="Assistant",
instructions="You are a helpful assistant.",
)
result = Runner.run_sync(agent, "Hello!")from pydantic_ai import Agent
agent = Agent("openai:gpt-4o")
result = agent.run_sync("Hello!")const OpenAI = require("openai");
const client = new OpenAI();
const response = await client.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "Hello!" }],
});const Anthropic = require("@anthropic-ai/sdk");
const client = new Anthropic();
const response = await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});const { GoogleGenAI } = require("@google/genai");
const client = new GoogleGenAI({ apiKey: "..." });
const response = await client.models.generateContent({
model: "gemini-2.0-flash",
contents: "Hello!",
});const { ChatOpenAI } = require("@langchain/openai");
const model = new ChatOpenAI({ model: "gpt-4o" });
const response = await model.invoke("Hello!");const { StateGraph, END } = require("@langchain/langgraph");
const { ChatOpenAI } = require("@langchain/openai");
const model = new ChatOpenAI({ model: "gpt-4o" });
const graph = new StateGraph({ channels: {} });
// Define your graph nodes and edges
const app = graph.compile();
const result = await app.invoke({ input: "Hello!" });const { generateText } = require("ai");
const { openai } = require("@ai-sdk/openai");
const result = await generateText({
model: openai("gpt-4o"),
prompt: "Hello!",
experimental_telemetry: { isEnabled: true },
});import * as Sentry from "@sentry/react";
import OpenAI from "openai";
const openai = new OpenAI({ apiKey: "...", dangerouslyAllowBrowser: true });
const client = Sentry.instrumentOpenAiClient(openai, {
recordInputs: true,
recordOutputs: true,
});
const response = await client.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "Hello!" }],
});import * as Sentry from "@sentry/react";
import Anthropic from "@anthropic-ai/sdk";
const anthropic = new Anthropic({ apiKey: "...", dangerouslyAllowBrowser: true });
const client = Sentry.instrumentAnthropicAiClient(anthropic, {
recordInputs: true,
recordOutputs: true,
});
const response = await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});import * as Sentry from "@sentry/react";
import { GoogleGenAI } from "@google/genai";
const genai = new GoogleGenAI({ apiKey: "..." });
const client = Sentry.instrumentGoogleGenerativeAiClient(genai, {
recordInputs: true,
recordOutputs: true,
});
const response = await client.models.generateContent({
model: "gemini-2.0-flash",
contents: "Hello!",
});import * as Sentry from "@sentry/react";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({ model: "gpt-4o" });
const sentryHandler = Sentry.createLangChainCallbackHandler();
const response = await model.invoke("Hello!", {
callbacks: [sentryHandler],
});import * as Sentry from "@sentry/react";
import { StateGraph, END } from "@langchain/langgraph";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({ model: "gpt-4o" });
const graph = new StateGraph({ channels: {} });
// Define your graph nodes and edges
const app = Sentry.instrumentLangGraphClient(graph.compile());
const result = await app.invoke({ input: "Hello!" });import * as Sentry from "@sentry/browser";
import OpenAI from "openai";
const openai = new OpenAI({ apiKey: "...", dangerouslyAllowBrowser: true });
const client = Sentry.instrumentOpenAiClient(openai, {
recordInputs: true,
recordOutputs: true,
});
const response = await client.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "Hello!" }],
});import * as Sentry from "@sentry/browser";
import Anthropic from "@anthropic-ai/sdk";
const anthropic = new Anthropic({ apiKey: "...", dangerouslyAllowBrowser: true });
const client = Sentry.instrumentAnthropicAiClient(anthropic, {
recordInputs: true,
recordOutputs: true,
});
const response = await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});import * as Sentry from "@sentry/browser";
import { GoogleGenAI } from "@google/genai";
const genai = new GoogleGenAI({ apiKey: "..." });
const client = Sentry.instrumentGoogleGenerativeAiClient(genai, {
recordInputs: true,
recordOutputs: true,
});
const response = await client.models.generateContent({
model: "gemini-2.0-flash",
contents: "Hello!",
});import * as Sentry from "@sentry/browser";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({ model: "gpt-4o" });
const sentryHandler = Sentry.createLangChainCallbackHandler();
const response = await model.invoke("Hello!", {
callbacks: [sentryHandler],
});import * as Sentry from "@sentry/browser";
import { StateGraph, END } from "@langchain/langgraph";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({ model: "gpt-4o" });
const graph = new StateGraph({ channels: {} });
// Define your graph nodes and edges
const app = Sentry.instrumentLangGraphClient(graph.compile());
const result = await app.invoke({ input: "Hello!" });Tolerated by 4 million developers.
- Nextdoor
- Instacart
- Atlassian
- Cisco Meraki
- Disney
- Riot Games
Monitor spending across models.
Compare costs across different models. See token usage breakdown by model, track input vs output tokens, and identify expensive operations.
Track agent tool calls and errors.
See which tools your agents call, their error rates, average duration, and P95 latency. Identify slow or failing tool executions before they impact users.
Debug with full context.
Dive into individual requests with full prompt and response context. See AI spans with agent invocations, tool executions, token counts, costs, and timing.
AI Monitoring FAQs
Sentry supports automatic instrumentation for OpenAI, Anthropic, LangChain, Vercel AI SDK, Google Gen AI, and more. Python SDKs auto-instrument when the library is detected. JavaScript SDKs require adding the integration to Sentry.init.
Sentry captures token usage (input, output, cached, reasoning), model name, latency, costs, tool calls, and optionally prompts and responses. You can control prompt/response capture with
recordInputs and recordOutputs options.For Python, set
send_default_pii=True in Sentry.init. For JavaScript, set recordInputs: true and recordOutputs: true in the integration config. Learn more about privacy controls.Yes. Sentry captures full agent traces including invoke_agent, execute_tool, and handoff spans. You can see the complete execution flow, which tools were called, their inputs/outputs, and timing. Learn more about agent monitoring.
Start monitoring your AI agents
Track LLM calls, token usage, and tool execution with full debugging context.