框架选择与环境搭建
本章介绍主流 Agent 开发框架,帮助你选择最适合项目的方案。
框架全景
graph TB
A[Agent 框架生态] --> B[通用框架]
A --> C[专用框架]
A --> D[编排框架]
B --> B1[LangChain / LangGraph]
B --> B2[LlamaIndex]
C --> C1[AutoGPT - 自主 Agent]
C --> C2[CrewAI - 多 Agent]
C --> C3[Claude Code - 编码 Agent]
D --> D1[LangGraph - 图编排]
D --> D2[Prefect - 工作流]
style A fill:#e3f2fd,stroke:#1976d2,stroke-width:3px
框架对比
| 框架 | 定位 | 优势 | 适合场景 |
|---|---|---|---|
| LangChain | 通用 LLM 框架 | 生态丰富,文档完善 | 快速原型 |
| LangGraph | 图编排 Agent | 精细流程控制 | 复杂 Agent 工作流 |
| LlamaIndex | 数据框架 | RAG + Agent 结合 | 数据密集应用 |
| CrewAI | 多 Agent | 简单易用 | 多Agent协作 |
| AutoGPT | 自主 Agent | 全自主运行 | 实验探索 |
| 原生实现 | 自定义 | 完全掌控 | 特定需求项目 |
方案一:LangGraph Agent
LangGraph 是目前构建复杂 Agent 的最佳选择:
"""
使用 LangGraph 构建 Agent
"""
# pip install langgraph langchain-openai
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage
from typing import TypedDict, Annotated
import operator
# 定义状态
class AgentState(TypedDict):
messages: Annotated[list, operator.add]
next_step: str
# 初始化 LLM
llm = ChatOpenAI(model="gpt-4o", temperature=0)
def should_continue(state: AgentState) -> str:
"""决策节点:继续还是结束"""
messages = state["messages"]
last_message = messages[-1]
# 如果最后一条消息有工具调用,继续
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
return "tools"
return "end"
def call_model(state: AgentState) -> dict:
"""调用 LLM"""
messages = state["messages"]
response = llm.invoke(messages)
return {"messages": [response]}
def call_tools(state: AgentState) -> dict:
"""执行工具调用"""
messages = state["messages"]
last_message = messages[-1]
results = []
for tool_call in last_message.tool_calls:
# 执行工具逻辑
result = f"工具 {tool_call['name']} 执行成功"
results.append({
"role": "tool",
"content": result,
"tool_call_id": tool_call["id"],
})
return {"messages": results}
# 构建图
workflow = StateGraph(AgentState)
# 添加节点
workflow.add_node("agent", call_model)
workflow.add_node("tools", call_tools)
# 设置入口
workflow.set_entry_point("agent")
# 添加边
workflow.add_conditional_edges("agent", should_continue, {"tools": "tools", "end": END})
workflow.add_edge("tools", "agent")
# 编译
app = workflow.compile()
# 使用
result = app.invoke({
"messages": [HumanMessage(content="帮我分析一下 Python 的优势")],
})
print(result["messages"][-1].content)
方案二:原生 Python Agent
不依赖框架,用纯 Python 构建,最大灵活性:
"""
纯 Python Agent 框架(无外部依赖)
"""
from openai import OpenAI
from dataclasses import dataclass, field
import json
@dataclass
class AgentConfig:
"""Agent 配置"""
model: str = "gpt-4o"
max_iterations: int = 10
temperature: float = 0
system_prompt: str = "你是一个有用的AI助手,使用提供的工具完成任务。"
@dataclass
class Message:
role: str
content: str
tool_calls: list = field(default_factory=list)
tool_call_id: str = ""
class PureAgent:
"""纯 Python Agent"""
def __init__(self, config: AgentConfig = None):
self.config = config or AgentConfig()
self.client = OpenAI()
self.tools: dict[str, dict] = {}
self.history: list[dict] = []
def tool(self, description: str, parameters: dict = None):
"""装饰器注册工具"""
def decorator(func):
self.tools[func.__name__] = {
"function": func,
"schema": {
"type": "function",
"function": {
"name": func.__name__,
"description": description,
"parameters": parameters or {
"type": "object",
"properties": {},
},
},
},
}
return func
return decorator
def run(self, task: str) -> str:
"""执行任务"""
self.history = [
{"role": "system", "content": self.config.system_prompt},
{"role": "user", "content": task},
]
tool_schemas = [t["schema"] for t in self.tools.values()]
for i in range(self.config.max_iterations):
response = self.client.chat.completions.create(
model=self.config.model,
messages=self.history,
tools=tool_schemas if tool_schemas else None,
temperature=self.config.temperature,
)
msg = response.choices[0].message
if not msg.tool_calls:
return msg.content
self.history.append(msg)
for tc in msg.tool_calls:
name = tc.function.name
args = json.loads(tc.function.arguments)
print(f" 🔧 {name}({args})")
try:
result = self.tools[name]["function"](**args)
except Exception as e:
result = f"错误: {e}"
self.history.append({
"role": "tool",
"tool_call_id": tc.id,
"content": str(result),
})
return "任务超时"
# ==================
# 使用示例
# ==================
agent = PureAgent(
AgentConfig(
model="gpt-4o",
system_prompt="你是一个数据分析助手。使用提供的工具帮用户分析数据。",
)
)
@agent.tool(
description="读取 CSV 文件并返回统计信息",
parameters={
"type": "object",
"properties": {
"filepath": {"type": "string", "description": "CSV 文件路径"},
},
"required": ["filepath"],
},
)
def analyze_csv(filepath: str) -> str:
"""分析 CSV 文件"""
# 模拟分析
return json.dumps({
"rows": 1000,
"columns": ["name", "age", "salary"],
"avg_age": 32.5,
"avg_salary": 85000,
})
@agent.tool(
description="生成数据可视化图表",
parameters={
"type": "object",
"properties": {
"chart_type": {"type": "string", "description": "图表类型"},
"data": {"type": "string", "description": "数据描述"},
},
"required": ["chart_type", "data"],
},
)
def create_chart(chart_type: str, data: str) -> str:
"""生成图表"""
return f"已生成 {chart_type} 图表: chart_{chart_type}.png"
result = agent.run("分析 data.csv 文件,给出关键统计信息,并生成柱状图")
print(result)
环境搭建
# 创建虚拟环境
python -m venv agent-env
source agent-env/bin/activate # Linux/Mac
# agent-env\Scripts\activate # Windows
# 安装核心依赖
pip install openai # LLM API
pip install langchain-openai # LangChain OpenAI
pip install langgraph # Agent 编排
# 可选依赖
pip install chromadb # 向量存储
pip install crewai # 多 Agent
pip install tavily-python # 搜索工具
本章小结
- LangGraph 适合需要复杂流程控制的 Agent 项目
- 原生 Python 实现适合高度定制化的需求
- 选择框架时要权衡灵活性和开发速度
- 建议从原生实现学起,理解原理后再使用框架
下一章:深入学习 Agent 的工具库设计与集成。