Build a stateful agent workflow with explicit graph structure. This is what Tenzai's Bonzai agent uses.
Tenzai's Bonzai exploiter agent is built on LangGraph. Understanding its core conceptsβstate graphs, nodes, edges, and conditional routingβis essential for working on the agent codebase.
Build a LangGraph agent with the same tool-calling loop pattern used in production. Add graceful handling when max iterations are reached.
βββββββββββββββ
β START β
ββββββββ¬βββββββ
β
βΌ
βββββββββββββββ
β call_model βββββββββββββββ
ββββββββ¬βββββββ β
β β
βΌ β
βββββββββββββββ β
β has_tools? β β
ββββββββ¬βββββββ β
β β
ββββββ΄βββββ β
β β β
βΌ βΌ β
[YES] [NO] β
β β β
βΌ βΌ β
βββββββ βββββββ β
βtoolsβ β END β β
ββββ¬βββ βββββββ β
β β
ββββββββββββββββββββββββββ
from typing import Annotated
from langgraph.graph.message import add_messages
from pydantic import BaseModel
from langchain_core.messages import BaseMessage
class AgentState(BaseModel):
# add_messages is a reducer that appends new messages to existing ones
messages: Annotated[list[BaseMessage], add_messages]
iteration: int = 0
from langchain_core.tools import tool
@tool
def calculator(expression: str) -> str:
"""Evaluate a math expression."""
try:
return str(eval(expression))
except Exception as e:
return f"Error: {e}"
@tool
def read_file(path: str) -> str:
"""Read contents of a file."""
try:
with open(path, 'r') as f:
return f.read()
except FileNotFoundError:
return f"File not found: {path}"
tools = [calculator, read_file]
from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessage
model = ChatOpenAI(model="gpt-4").bind_tools(tools)
async def call_model(state: AgentState) -> dict:
"""Call the LLM with current messages."""
response = await model.ainvoke(state.messages)
return {
"messages": [response],
"iteration": state.iteration + 1
}
def should_continue(state: AgentState) -> str:
"""Determine next step based on last message."""
last_message = state.messages[-1]
# If the LLM made tool calls, execute them
if isinstance(last_message, AIMessage) and last_message.tool_calls:
return "tools"
# Otherwise, we're done
return "end"
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
def create_workflow():
# Create tool execution node
tool_node = ToolNode(tools)
# Build graph
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("call_model", call_model)
workflow.add_node("tools", tool_node)
# Set entry point
workflow.set_entry_point("call_model")
# Add conditional edge from call_model
workflow.add_conditional_edges(
"call_model",
should_continue,
{
"tools": "tools",
"end": END
}
)
# After tools, always go back to model
workflow.add_edge("tools", "call_model")
return workflow.compile()
graph = create_workflow()
from langchain_core.messages import HumanMessage
from langgraph.errors import GraphRecursionError
async def run_agent(user_input: str, max_steps: int = 10):
initial_state = {
"messages": [HumanMessage(content=user_input)],
"iteration": 0
}
config = {"recursion_limit": max_steps}
try:
async for event in graph.astream(initial_state, config=config):
for node_name, state_update in event.items():
print(f"[{node_name}] {state_update}")
# Get final state
final_state = await graph.ainvoke(initial_state, config=config)
return final_state["messages"][-1].content
except GraphRecursionError:
return "Reached maximum iterations - task incomplete"
async def graceful_exit_on_max_steps(state: AgentState) -> str:
"""When max iterations hit, ask the model to summarize progress."""
summary_prompt = """You've reached the maximum number of steps.
Please summarize:
1. What you accomplished
2. What remains incomplete
3. Any recommendations for next steps"""
messages = state.messages + [HumanMessage(content=summary_prompt)]
response = await model.ainvoke(messages)
return response.content
Official docs with concepts and tutorials
Step-by-step introduction to building agents
Official examples from the LangGraph repo
Understanding when to use graph-based agents
call_model and tools