Agents - Complete Reference
Agent Constructor
from agents import Agent
agent = Agent(
# Required
name: str, # Agent identifier
# Instructions (choose one)
instructions: str | Callable, # System prompt or dynamic function
# Model Configuration
model: str = "gpt-4.1", # Model to use
model_settings: ModelSettings, # Temperature, top_p, etc.
# Capabilities
tools: list[Tool] = [], # Function tools
mcp_servers: list[MCPServer] = [], # MCP server tools
handoffs: list[Agent | Handoff], # Delegation targets
# Output
output_type: type[BaseModel], # Structured output schema
# Safety
input_guardrails: list[Guardrail],
output_guardrails: list[Guardrail],
# Advanced
reset_tool_choice: bool = True, # Prevent tool loops
hooks: AgentHooks, # Lifecycle callbacks
)
Dynamic Instructions
Instructions can be a string or a function for runtime customization:
def dynamic_instructions(
context: RunContextWrapper[UserContext],
agent: Agent[UserContext]
) -> str:
user = context.context
return f"""You are a financial assistant for {user.name}.
User ID: {user.user_id}
Premium status: {user.is_premium}
Help them manage their finances."""
agent = Agent[UserContext](
name="Finance Assistant",
instructions=dynamic_instructions,
)
Context (Dependency Injection)
Context passes data to agents, tools, and handoffs without exposing to LLM:
from dataclasses import dataclass
from agents import Agent, Runner, RunContextWrapper, function_tool
@dataclass
class AppContext:
user_id: str
db_connection: DatabasePool
logger: Logger
# Tool receives context
@function_tool
async def get_user_data(ctx: RunContextWrapper[AppContext]) -> str:
"""Fetch user data."""
db = ctx.context.db_connection
user_id = ctx.context.user_id
return await db.fetch_user(user_id)
# Agent typed with context
agent = Agent[AppContext](
name="DataAgent",
instructions="Fetch and analyze user data.",
tools=[get_user_data],
)
# Pass context at runtime
app_ctx = AppContext(
user_id="user_123",
db_connection=pool,
logger=logger,
)
result = await Runner.run(agent, "Get my data", context=app_ctx)
ToolContext (Extended Context)
For tool-specific metadata:
from agents import function_tool
from agents.tool import ToolContext
@function_tool
def log_tool_call(ctx: ToolContext[AppContext], data: str) -> str:
"""Tool with access to call metadata."""
print(f"Tool: {ctx.tool_name}")
print(f"Call ID: {ctx.tool_call_id}")
print(f"Arguments: {ctx.tool_arguments}")
return f"Processed: {data}"
Structured Output
Force agent to return typed data:
from pydantic import BaseModel, Field
from agents import Agent
class TransactionAnalysis(BaseModel):
total_amount: float = Field(description="Sum of all transactions")
category_breakdown: dict[str, float] = Field(description="Spending by category")
insights: list[str] = Field(description="Key observations")
risk_level: str = Field(description="low, medium, or high")
agent = Agent(
name="Transaction Analyzer",
instructions="Analyze the provided transactions.",
output_type=TransactionAnalysis,
)
result = await Runner.run(agent, transactions_text)
analysis: TransactionAnalysis = result.final_output
print(f"Total: {analysis.total_amount}")
print(f"Risk: {analysis.risk_level}")
Model Settings
from agents import Agent, ModelSettings
from agents.models import Reasoning
# Basic settings
agent = Agent(
model="gpt-4.1",
model_settings=ModelSettings(
temperature=0.7,
top_p=0.9,
max_tokens=4096,
),
)
# GPT-5 with reasoning
agent = Agent(
model="gpt-5.2",
model_settings=ModelSettings(
reasoning=Reasoning(effort="high"),
verbosity="low",
),
)
# Force specific tool
agent = Agent(
tools=[get_weather, get_news],
model_settings=ModelSettings(tool_choice="get_weather"),
)
Tool Choice Options
| Value | Behavior |
|---|---|
"auto" | Model decides whether to use tools |
"required" | Must use at least one tool |
"none" | Cannot use tools |
"tool_name" | Must use specific tool |
Agent Cloning
Create variations of an agent:
base_agent = Agent(
name="Base",
instructions="You are helpful.",
tools=[get_data],
)
# Clone with modifications
formal_agent = base_agent.clone(
name="Formal",
instructions="You are helpful. Always use formal language.",
)
casual_agent = base_agent.clone(
name="Casual",
instructions="You are helpful. Be casual and friendly.",
)
Lifecycle Hooks
Monitor agent execution:
from agents import Agent, AgentHooks
class MyHooks(AgentHooks):
async def on_run_start(self, context, agent):
print(f"Starting run with {agent.name}")
async def on_run_end(self, context, agent, result):
print(f"Completed with: {result.final_output[:100]}")
async def on_tool_start(self, context, agent, tool):
print(f"Calling tool: {tool.name}")
async def on_tool_end(self, context, agent, tool, result):
print(f"Tool returned: {result[:100]}")
agent = Agent(
name="Monitored Agent",
hooks=MyHooks(),
)
Using Non-OpenAI Models
LiteLLM Integration
pip install "openai-agents[litellm]"
from agents import Agent
# Anthropic Claude
claude_agent = Agent(
name="Claude Agent",
model="litellm/anthropic/claude-3-5-sonnet-20240620",
instructions="You are helpful.",
)
# Google Gemini
gemini_agent = Agent(
name="Gemini Agent",
model="litellm/gemini/gemini-2.5-flash-preview-04-17",
instructions="You are helpful.",
)
Custom OpenAI Client
from openai import AsyncOpenAI
from agents import set_default_openai_client
# Use custom endpoint (Azure, local, etc.)
client = AsyncOpenAI(
api_key="your-key",
base_url="https://your-endpoint.com/v1",
)
set_default_openai_client(client)
Prompt Templates (OpenAI Platform)
Reference stored prompts:
agent = Agent(
name="Templated Agent",
prompt={
"id": "pmpt_abc123",
"version": "1",
"variables": {
"tone": "professional",
"domain": "finance",
},
},
)