自我反思与改进
High Contrast
Dark Mode
Light Mode
Sepia
Forest
2 min read436 words

自我反思与改进

Reflexion 是一种让 Agent 自省并从错误中学习的架构,显著提升 Agent 完成复杂任务的成功率。

Reflexion 原理

graph TB A[接收任务] --> B[生成初始方案] B --> C[执行方案] C --> D[评估结果] D --> E{结果满意?} E -->|否| F[反思失败原因] F --> G[生成改进方案] G --> C E -->|是| H[返回结果] style F fill:#fff3e0,stroke:#f57c00,stroke-width:3px style D fill:#e3f2fd,stroke:#1976d2,stroke-width:2px

与简单重试不同,Reflexion 会分析失败原因利用反思结果来改进下次尝试。

实现 Reflexion Agent

"""
Reflexion Agent - 自我反思与改进
"""
from openai import OpenAI
from dataclasses import dataclass
@dataclass
class ReflectionResult:
"""反思结果"""
attempt: int
output: str
evaluation: dict
reflection: str
passed: bool
class ReflexionAgent:
"""支持自我反思的 Agent"""
def __init__(self, model: str = "gpt-4o", max_retries: int = 3):
self.client = OpenAI()
self.model = model
self.max_retries = max_retries
self.reflections: list[ReflectionResult] = []
def run(self, task: str, eval_criteria: str = None) -> str:
"""执行任务,带自动反思改进"""
for attempt in range(1, self.max_retries + 1):
print(f"\n=== 尝试 {attempt}/{self.max_retries} ===")
# 1. 生成方案(利用之前的反思)
output = self._generate(task, attempt)
print(f"输出:\n{output[:200]}...")
# 2. 评估结果
evaluation = self._evaluate(task, output, eval_criteria)
print(f"评估: 分数={evaluation['score']}/10, 通过={evaluation['passed']}")
if evaluation["passed"]:
print("✅ 任务通过!")
return output
# 3. 反思失败原因
reflection = self._reflect(task, output, evaluation)
print(f"反思: {reflection[:100]}...")
self.reflections.append(ReflectionResult(
attempt=attempt,
output=output,
evaluation=evaluation,
reflection=reflection,
passed=False,
))
# 所有尝试都失败,返回最后一次的输出
print("⚠️ 达到最大重试次数")
return output
def _generate(self, task: str, attempt: int) -> str:
"""生成解决方案"""
messages = [
{"role": "system", "content": "你是一个优秀的问题解决者。请认真完成任务。"},
]
# 加入之前的反思经验
if self.reflections:
reflection_text = "\n\n".join(
f"## 第 {r.attempt} 次尝试\n"
f"失败原因: {r.evaluation.get('feedback', '')}\n"
f"反思: {r.reflection}"
for r in self.reflections
)
messages.append({
"role": "system",
"content": f"以下是之前的尝试和反思,请避免重复犯错:\n{reflection_text}",
})
messages.append({"role": "user", "content": task})
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.3,
)
return response.choices[0].message.content
def _evaluate(self, task: str, output: str, criteria: str = None) -> dict:
"""评估输出质量"""
eval_prompt = f"""评估以下输出是否满足任务要求。
任务: {task}
{f'评估标准: {criteria}' if criteria else ''}
输出:
{output}
请以 JSON 格式返回:
{{"score": 1-10的分数, "passed": true/false (7分以上通过), "feedback": "具体反馈"}}"""
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "你是严格的质量评估专家。"},
{"role": "user", "content": eval_prompt},
],
temperature=0,
response_format={"type": "json_object"},
)
import json
try:
return json.loads(response.choices[0].message.content)
except json.JSONDecodeError:
return {"score": 5, "passed": False, "feedback": "评估失败"}
def _reflect(self, task: str, output: str, evaluation: dict) -> str:
"""反思失败原因,生成改进建议"""
response = self.client.chat.completions.create(
model=self.model,
messages=[
{
"role": "system",
"content": """你是反思专家。分析失败原因并给出具体改进建议。
格式:
1. 根本原因: ...
2. 具体问题: ...
3. 改进建议: ...""",
},
{
"role": "user",
"content": f"任务: {task}\n\n输出: {output[:500]}\n\n评估反馈: {evaluation.get('feedback', '')}",
},
],
temperature=0,
)
return response.choices[0].message.content
# ==================
# 使用示例
# ==================
agent = ReflexionAgent(max_retries=3)
result = agent.run(
task="编写一个 Python 函数,找出列表中的第二大的数。要求处理边界情况(空列表、重复元素等)。",
eval_criteria="代码正确性、边界处理、代码质量",
)
print(f"\n=== 最终结果 ===\n{result}")

自动代码修复

Reflexion 特别适合代码生成和修复场景:

"""
自动代码修复 Agent
"""
class CodeFixAgent:
"""自动修复代码的 Agent"""
def __init__(self):
self.client = OpenAI()
self.executor = SafeCodeExecutor()  # 来自之前章节
def fix_code(self, code: str, test_cases: list[dict]) -> str:
"""通过测试驱动的方式修复代码"""
current_code = code
max_attempts = 5
for attempt in range(1, max_attempts + 1):
print(f"\n--- 修复尝试 {attempt} ---")
# 运行测试
test_results = self._run_tests(current_code, test_cases)
# 检查是否全部通过
failed = [r for r in test_results if not r["passed"]]
if not failed:
print("✅ 所有测试通过!")
return current_code
print(f"❌ {len(failed)}/{len(test_results)} 个测试失败")
for f in failed:
print(f"  - {f['name']}: {f['error']}")
# 让 LLM 修复
current_code = self._fix(current_code, test_results)
return current_code
def _run_tests(self, code: str, test_cases: list[dict]) -> list[dict]:
"""运行测试用例"""
results = []
for tc in test_cases:
full_code = f"{code}\n\n{tc['test_code']}"
result = self.executor.execute(full_code)
results.append({
"name": tc["name"],
"passed": result["success"] and "AssertionError" not in result.get("error", ""),
"output": result["output"],
"error": result.get("error", ""),
})
return results
def _fix(self, code: str, test_results: list[dict]) -> str:
"""LLM 修复代码"""
failed = [r for r in test_results if not r["passed"]]
errors = "\n".join(
f"测试 '{r['name']}' 失败: {r['error']}" for r in failed
)
response = self.client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "修复代码使所有测试通过。只返回修复后的完整代码。"},
{"role": "user", "content": f"代码:\n{code}\n\n失败测试:\n{errors}"},
],
temperature=0,
)
# 提取代码块
content = response.choices[0].message.content
if "```python" in content:
content = content.split("```python")[1].split("```")[0]
return content.strip()
# 使用
fixer = CodeFixAgent()
buggy_code = """
def find_second_largest(nums):
return sorted(nums)[-2]
"""
test_cases = [
{"name": "基本测试", "test_code": "assert find_second_largest([1,2,3,4,5]) == 4"},
{"name": "重复元素", "test_code": "assert find_second_largest([5,5,4,3]) == 4"},
{"name": "两个元素", "test_code": "assert find_second_largest([1,2]) == 1"},
{"name": "负数", "test_code": "assert find_second_largest([-1,-2,-3]) == -2"},
]
fixed_code = fixer.fix_code(buggy_code, test_cases)

反思模式的变体

graph TB A[反思模式] --> B[自我评估] A --> C[外部评估] A --> D[对比反思] B --> B1[LLM 评估自己的输出] C --> C1[运行测试/工具验证] D --> D1[与标准答案对比] style A fill:#e3f2fd,stroke:#1976d2,stroke-width:3px
模式 可靠性 适用场景
自我评估 开放性任务
外部评估 有客观标准的任务(代码、数学)
对比反思 有参考答案的任务

本章小结

下一章:学习 Agent 的安全性与可控性设计。