IntegrationsAgent Frameworks
Langgraph Integration
Automatically trace Langgraph graph executions and node interactions.
Langgraph integration captures traces from your Langgraph applications, including graph execution flow, individual node calls, and state transitions between nodes.
Quickstart
Install Dependencies
uv add langgraph judgeval langchain-openai
pip install langgraph judgeval langchain-openai
Initialize Integration
from judgeval.tracer import Tracer
from judgeval.integrations.langgraph import Langgraph
tracer = Tracer(project_name="langgraph_project")
Langgraph.initialize()
Add to Existing Code
Add these lines to your existing Langgraph application:
from langgraph.graph import StateGraph, START, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from typing import TypedDict, List
from judgeval.tracer import Tracer
from judgeval.integrations.langgraph import Langgraph
tracer = Tracer(project_name="langgraph-agent")
Langgraph.initialize()
class AgentState(TypedDict):
messages: List[dict]
task: str
result: str
def research_agent(state: AgentState) -> AgentState:
llm = ChatOpenAI(model="gpt-5-mini")
response = llm.invoke([HumanMessage(content=f"Research: {state['task']}")])
return {
**state,
"messages": state["messages"] + [{"role": "assistant", "content": response.content}],
"result": f"Research completed for: {state['task']}"
}
graph = StateGraph(AgentState)
graph.add_edge(START, "research")
graph.add_node("research", research_agent)
graph.add_edge("research", END)
workflow = graph.compile()
result = workflow.invoke({
"messages": [],
"task": "Build a web scraper",
"result": ""
})
print(result)
All graph executions and node calls are automatically traced.
Example: Multi-Agent Workflow
from judgeval.tracer import Tracer
from judgeval.integrations.langgraph import Langgraph
from langgraph.graph import StateGraph, START, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from typing import TypedDict, List
tracer = Tracer(project_name="multi_agent_workflow")
Langgraph.initialize()
class AgentState(TypedDict):
messages: List[dict]
task: str
result: str
def research_agent(state: AgentState) -> AgentState:
llm = ChatOpenAI(model="gpt-5-mini")
response = llm.invoke([HumanMessage(content=f"Research: {state['task']}")])
return {
**state,
"messages": state["messages"] + [{"role": "assistant", "content": response.content}],
"result": f"Research completed for: {state['task']}"
}
def planning_agent(state: AgentState) -> AgentState:
llm = ChatOpenAI(model="gpt-5-mini")
response = llm.invoke([HumanMessage(content=f"Create plan for: {state['task']}")])
return {
**state,
"messages": state["messages"] + [{"role": "assistant", "content": response.content}],
"result": f"Plan created for: {state['task']}"
}
def execution_agent(state: AgentState) -> AgentState:
llm = ChatOpenAI(model="gpt-5-mini")
response = llm.invoke([HumanMessage(content=f"Execute: {state['task']}")])
return {
**state,
"messages": state["messages"] + [{"role": "assistant", "content": response.content}],
"result": f"Task completed: {state['task']}"
}
@tracer.observe(span_type="function")
def main():
graph = StateGraph(AgentState)
graph.add_node("research", research_agent)
graph.add_node("planning", planning_agent)
graph.add_node("execution", execution_agent)
graph.set_entry_point("research")
graph.add_edge("research", "planning")
graph.add_edge("planning", "execution")
graph.add_edge("execution", END)
workflow = graph.compile()
result = workflow.invoke({
"messages": [],
"task": "Build a customer service bot",
"result": ""
})
print(result)
if __name__ == "__main__":
main()