Agent Frameworks

Langgraph

Automatically trace Langgraph graph executions and node interactions.

Langgraph integration captures traces from your Langgraph applications, including graph execution flow, individual node calls, and state transitions between nodes.

Quickstart

Install Dependencies

uv add langgraph judgeval langchain-openai
pip install langgraph judgeval langchain-openai

Initialize Integration

setup.py
from judgeval.tracer import Tracer
from judgeval.integrations.langgraph import Langgraph

tracer = Tracer(project_name="langgraph_project")
Langgraph.initialize()

Always initialize the Tracer before calling Langgraph.initialize() to ensure proper trace routing.

Add to Existing Code

Add these lines to your existing Langgraph application:

from langgraph.graph import StateGraph, START, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from typing import TypedDict, List
from judgeval.tracer import Tracer  
from judgeval.integrations.langgraph import Langgraph  

tracer = Tracer(project_name="langgraph-agent")  
Langgraph.initialize()  

class AgentState(TypedDict):
    messages: List[dict]
    task: str
    result: str

def research_agent(state: AgentState) -> AgentState:
    llm = ChatOpenAI(model="gpt-5-mini")
    response = llm.invoke([HumanMessage(content=f"Research: {state['task']}")])

    return {
        **state,
        "messages": state["messages"] + [{"role": "assistant", "content": response.content}],
        "result": f"Research completed for: {state['task']}"
    }

graph = StateGraph(AgentState)
graph.add_edge(START, "research")
graph.add_node("research", research_agent)
graph.add_edge("research", END)
workflow = graph.compile()

result = workflow.invoke({
    "messages": [],
    "task": "Build a web scraper",
    "result": ""
})
print(result)

All graph executions and node calls are automatically traced.

Example: Multi-Agent Workflow

multi_agent_example.py
from judgeval.tracer import Tracer
from judgeval.integrations.langgraph import Langgraph
from langgraph.graph import StateGraph, START, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from typing import TypedDict, List

tracer = Tracer(project_name="multi_agent_workflow")
Langgraph.initialize()

class AgentState(TypedDict):
    messages: List[dict]
    task: str
    result: str

def research_agent(state: AgentState) -> AgentState:
    llm = ChatOpenAI(model="gpt-5-mini")
    response = llm.invoke([HumanMessage(content=f"Research: {state['task']}")])

    return {
        **state,
        "messages": state["messages"] + [{"role": "assistant", "content": response.content}],
        "result": f"Research completed for: {state['task']}"
    }

def planning_agent(state: AgentState) -> AgentState:
    llm = ChatOpenAI(model="gpt-5-mini")
    response = llm.invoke([HumanMessage(content=f"Create plan for: {state['task']}")])

    return {
        **state,
        "messages": state["messages"] + [{"role": "assistant", "content": response.content}],
        "result": f"Plan created for: {state['task']}"
    }

def execution_agent(state: AgentState) -> AgentState:
    llm = ChatOpenAI(model="gpt-5-mini")
    response = llm.invoke([HumanMessage(content=f"Execute: {state['task']}")])

    return {
        **state,
        "messages": state["messages"] + [{"role": "assistant", "content": response.content}],
        "result": f"Task completed: {state['task']}"
    }

@tracer.observe(span_type="function")  
def main():
    graph = StateGraph(AgentState)
    graph.add_node("research", research_agent)
    graph.add_node("planning", planning_agent)
    graph.add_node("execution", execution_agent)

    graph.set_entry_point("research")
    graph.add_edge("research", "planning")
    graph.add_edge("planning", "execution")
    graph.add_edge("execution", END)

    workflow = graph.compile()

    result = workflow.invoke({
        "messages": [],
        "task": "Build a customer service bot",
        "result": ""
    })
    print(result)

if __name__ == "__main__":
    main()

Tracking Non-Langgraph Nodes: Use @tracer.observe() to track any function or method that's not part of your Langgraph workflow. This is especially useful for monitoring utility functions, API calls, or other operations that happen outside the graph execution but are part of your overall application flow.

complete_example.py
from langgraph.graph import StateGraph, START, END
from judgeval.tracer import Tracer

tracer = Tracer(project_name="my_agent")

@tracer.observe(span_type="function")
def helper_function(data: str) -> str:
    # Helper function tracked with @tracer.observe()
    return f"Processed: {data}"

def langgraph_node(state):
    # Langgraph nodes are automatically traced
    # Can call helper functions within nodes
    result = helper_function(state["input"])
    return {"result": result}

# Set up and invoke Langgraph workflow
graph = StateGraph(dict)
graph.add_node("process", langgraph_node)
graph.add_edge(START, "process")
graph.add_edge("process", END)

workflow = graph.compile()

# Execute the workflow - both Langgraph and helper functions are traced
result = workflow.invoke({"input": "Hello World"})
print(result["result"])  # Output: "Processed: Hello World"

Next Steps