高级 RAG 技术
基础 RAG 在很多场景下效果不够好。本章介绍几种前沿的 RAG 改进技术。
Self-RAG:自我反思 RAG
Self-RAG 让模型在生成过程中自我判断是否需要检索、检索结果是否相关、生成内容是否准确。
graph TB
A[用户问题] --> B{需要检索吗?}
B -->|不需要| C[直接生成]
B -->|需要| D[执行检索]
D --> E{检索结果相关吗?}
E -->|相关| F[基于结果生成]
E -->|不相关| G[丢弃结果]
G --> H[重新检索或直接生成]
F --> I{生成内容有依据吗?}
I -->|有| J[输出回答]
I -->|无| K[重新生成]
style B fill:#fff3e0,stroke:#f57c00,stroke-width:2px
style E fill:#fff3e0,stroke:#f57c00,stroke-width:2px
style I fill:#fff3e0,stroke:#f57c00,stroke-width:2px
style J fill:#c8e6c9,stroke:#388e3c,stroke-width:2px
Self-RAG 实现
"""
Self-RAG 实现
在检索和生成的每个步骤中加入自我反思
"""
from openai import OpenAI
class SelfRAG:
"""Self-RAG 系统"""
def __init__(self, retriever):
self.client = OpenAI()
self.retriever = retriever
def answer(self, question: str) -> dict:
"""Self-RAG 完整流程"""
# Step 1: 判断是否需要检索
need_retrieval = self._check_need_retrieval(question)
if not need_retrieval:
# 直接回答(简单事实问题)
answer = self._generate_direct(question)
return {"answer": answer, "method": "direct", "sources": []}
# Step 2: 检索
documents = self.retriever.retrieve(question, top_k=5)
# Step 3: 评估检索结果的相关性
relevant_docs = self._filter_relevant(question, documents)
if not relevant_docs:
return {
"answer": "抱歉,根据现有知识库,我无法找到与您问题相关的信息。",
"method": "no_relevant_docs",
"sources": []
}
# Step 4: 生成回答
answer = self._generate_with_context(question, relevant_docs)
# Step 5: 验证回答是否有依据
is_grounded = self._check_grounding(answer, relevant_docs)
if not is_grounded:
# 重新生成,使用更严格的 Prompt
answer = self._generate_strict(question, relevant_docs)
return {
"answer": answer,
"method": "self_rag",
"sources": [d.get("source", "") for d in relevant_docs],
"grounded": is_grounded
}
def _check_need_retrieval(self, question: str) -> bool:
"""判断是否需要检索"""
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"""判断以下问题是否需要查阅外部资料才能准确回答。
回答 YES 或 NO。
问题: {question}
需要检索:"""
}],
temperature=0,
max_tokens=5
)
return "YES" in response.choices[0].message.content.upper()
def _filter_relevant(
self, question: str, documents: list[dict]
) -> list[dict]:
"""过滤出相关文档"""
relevant = []
for doc in documents:
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"""这段资料是否与问题相关?回答 YES 或 NO。
问题: {question}
资料: {doc['content'][:500]}
相关:"""
}],
temperature=0,
max_tokens=5
)
if "YES" in response.choices[0].message.content.upper():
relevant.append(doc)
return relevant
def _check_grounding(
self, answer: str, documents: list[dict]
) -> bool:
"""检查回答是否有文档依据"""
context = "\n".join([d["content"] for d in documents])
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"""检查回答中的信息是否都能在参考资料中找到依据。
回答 GROUNDED 或 NOT_GROUNDED。
参考资料: {context[:2000]}
回答: {answer}
判断:"""
}],
temperature=0,
max_tokens=10
)
return "GROUNDED" in response.choices[0].message.content.upper()
def _generate_with_context(
self, question: str, documents: list[dict]
) -> str:
"""基于上下文生成回答"""
context = "\n\n".join([
f"[{i+1}] {d['content']}" for i, d in enumerate(documents)
])
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"""根据参考资料回答问题,标注引用 [编号]。
参考资料:
{context}
问题: {question}
回答:"""
}],
temperature=0.3
)
return response.choices[0].message.content
def _generate_direct(self, question: str) -> str:
"""直接回答"""
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": question}],
temperature=0.3
)
return response.choices[0].message.content
def _generate_strict(
self, question: str, documents: list[dict]
) -> str:
"""严格模式生成"""
context = "\n\n".join([d["content"] for d in documents])
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"""严格只使用参考资料中的信息回答。
如果资料不足,请说"资料不足以完整回答"。
参考资料: {context}
问题: {question}
回答:"""
}],
temperature=0.1
)
return response.choices[0].message.content
GraphRAG:知识图谱增强
GraphRAG 将文档转换为知识图谱,利用实体和关系进行更精确的检索。
graph TB
subgraph 传统 RAG
A1[文档] --> A2[向量化]
A2 --> A3[向量检索]
end
subgraph GraphRAG
B1[文档] --> B2[抽取实体和关系]
B2 --> B3[构建知识图谱]
B3 --> B4[图遍历检索]
B4 --> B5[社区检测]
B5 --> B6[多层摘要]
end
A3 --> C[生成回答]
B6 --> C
style B3 fill:#fff3e0,stroke:#f57c00,stroke-width:3px
GraphRAG 简化实现
"""
GraphRAG 简化实现
使用 LLM 抽取实体和关系,构建知识图谱
"""
import json
from collections import defaultdict
class SimpleGraphRAG:
"""简化版 GraphRAG"""
def __init__(self):
self.client = OpenAI()
self.entities = {} # 实体库
self.relations = [] # 关系列表
self.entity_docs = defaultdict(list) # 实体到文档的映射
def add_document(self, doc_id: str, content: str) -> None:
"""处理文档,抽取实体和关系"""
# 使用 LLM 抽取
extracted = self._extract_entities_and_relations(content)
for entity in extracted.get("entities", []):
name = entity["name"]
self.entities[name] = {
"type": entity.get("type", "UNKNOWN"),
"description": entity.get("description", "")
}
self.entity_docs[name].append({
"doc_id": doc_id,
"content": content
})
for relation in extracted.get("relations", []):
self.relations.append({
"source": relation["source"],
"target": relation["target"],
"relation": relation["relation"],
"doc_id": doc_id
})
def _extract_entities_and_relations(self, text: str) -> dict:
"""使用 LLM 抽取实体和关系"""
prompt = f"""从以下文本中抽取实体和关系。
文本:
{text[:2000]}
请以 JSON 格式返回:
{{
"entities": [
{{"name": "实体名", "type": "PERSON/ORG/TECH/CONCEPT", "description": "简短描述"}}
],
"relations": [
{{"source": "实体1", "target": "实体2", "relation": "关系描述"}}
]
}}
JSON:"""
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}],
temperature=0,
response_format={"type": "json_object"}
)
try:
return json.loads(response.choices[0].message.content)
except json.JSONDecodeError:
return {"entities": [], "relations": []}
def query(self, question: str, depth: int = 2) -> dict:
"""基于知识图谱的查询"""
# Step 1: 从问题中抽取关键实体
key_entities = self._extract_query_entities(question)
# Step 2: 图遍历,找到相关实体和文档
related = self._traverse_graph(key_entities, depth)
# Step 3: 收集相关文档
relevant_docs = []
for entity in related:
if entity in self.entity_docs:
relevant_docs.extend(self.entity_docs[entity])
# Step 4: 去重
seen = set()
unique_docs = []
for doc in relevant_docs:
if doc["doc_id"] not in seen:
seen.add(doc["doc_id"])
unique_docs.append(doc)
return {
"entities": key_entities,
"related_entities": related,
"documents": unique_docs
}
def _extract_query_entities(self, question: str) -> list[str]:
"""从问题中抽取实体"""
# 简单匹配:检查问题中是否包含已知实体
found = []
for entity_name in self.entities:
if entity_name.lower() in question.lower():
found.append(entity_name)
return found
def _traverse_graph(
self, start_entities: list[str], depth: int
) -> set:
"""图遍历,获取相关实体"""
visited = set(start_entities)
current_level = set(start_entities)
for _ in range(depth):
next_level = set()
for entity in current_level:
# 查找相关关系
for rel in self.relations:
if rel["source"] == entity:
next_level.add(rel["target"])
elif rel["target"] == entity:
next_level.add(rel["source"])
next_level -= visited
visited |= next_level
current_level = next_level
return visited
Adaptive RAG:自适应 RAG
根据问题的复杂度动态选择检索策略:
graph TB
A[用户问题] --> B{问题分类}
B -->|简单事实| C[单次检索]
B -->|比较分析| D[多次检索]
B -->|综合总结| E[递归检索]
B -->|不需检索| F[直接回答]
C --> G[生成回答]
D --> G
E --> G
F --> G
style B fill:#fff3e0,stroke:#f57c00,stroke-width:3px
"""
Adaptive RAG 实现
根据问题复杂度自动选择最佳策略
"""
class AdaptiveRAG:
"""自适应 RAG"""
def __init__(self, retriever):
self.client = OpenAI()
self.retriever = retriever
def answer(self, question: str) -> dict:
"""自适应回答"""
# 分类问题
category = self._classify_question(question)
# 根据分类选择策略
strategies = {
"factual": self._handle_factual,
"analytical": self._handle_analytical,
"summary": self._handle_summary,
"direct": self._handle_direct,
}
handler = strategies.get(category, self._handle_factual)
return handler(question)
def _classify_question(self, question: str) -> str:
"""分类问题复杂度"""
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"""将以下问题分类为以下之一:
- factual: 需要查找具体事实
- analytical: 需要对比分析多个信息
- summary: 需要综合总结大量信息
- direct: 无需检索即可回答
问题: {question}
分类:"""
}],
temperature=0,
max_tokens=10
)
result = response.choices[0].message.content.strip().lower()
valid = {"factual", "analytical", "summary", "direct"}
return result if result in valid else "factual"
def _handle_factual(self, question: str) -> dict:
"""简单事实问题:单次检索"""
docs = self.retriever.retrieve(question, top_k=3)
answer = self._generate(question, docs)
return {"answer": answer, "strategy": "factual", "retrievals": 1}
def _handle_analytical(self, question: str) -> dict:
"""分析问题:查询分解 + 多次检索"""
sub_questions = self._decompose(question)
all_docs = []
for sq in sub_questions:
docs = self.retriever.retrieve(sq, top_k=3)
all_docs.extend(docs)
# 去重
seen = set()
unique_docs = []
for d in all_docs:
key = d.get("content", "")[:100]
if key not in seen:
seen.add(key)
unique_docs.append(d)
answer = self._generate(question, unique_docs)
return {
"answer": answer,
"strategy": "analytical",
"sub_questions": sub_questions,
"retrievals": len(sub_questions)
}
def _handle_summary(self, question: str) -> dict:
"""总结问题:递归检索 + 分层总结"""
# 第一轮检索
docs = self.retriever.retrieve(question, top_k=10)
# 分组总结
summaries = []
batch_size = 3
for i in range(0, len(docs), batch_size):
batch = docs[i:i + batch_size]
summary = self._summarize_batch(question, batch)
summaries.append({"content": summary})
# 最终总结
answer = self._generate(question, summaries)
return {"answer": answer, "strategy": "summary", "retrievals": 1}
def _handle_direct(self, question: str) -> dict:
"""直接回答,无需检索"""
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": question}],
temperature=0.3
)
return {
"answer": response.choices[0].message.content,
"strategy": "direct",
"retrievals": 0
}
def _decompose(self, question: str) -> list[str]:
"""分解复杂问题"""
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"将以下问题分解为2-3个子问题,每行一个:\n{question}"
}],
temperature=0.3
)
lines = response.choices[0].message.content.strip().split("\n")
return [l.strip() for l in lines if l.strip()]
def _summarize_batch(
self, question: str, docs: list[dict]
) -> str:
"""总结一批文档"""
context = "\n".join([d.get("content", "") for d in docs])
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"针对问题「{question}」总结以下资料的关键信息:\n{context}"
}],
temperature=0.3
)
return response.choices[0].message.content
def _generate(self, question: str, docs: list[dict]) -> str:
"""生成最终回答"""
context = "\n\n".join([
f"[{i+1}] {d.get('content', '')}"
for i, d in enumerate(docs)
])
response = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[{
"role": "user",
"content": f"根据参考资料回答:\n\n{context}\n\n问题: {question}"
}],
temperature=0.3
)
return response.choices[0].message.content
技术对比
| 技术 | 核心思想 | 适用场景 | 复杂度 |
|---|---|---|---|
| Naive RAG | 检索 + 生成 | 简单问答 | 低 |
| Advanced RAG | 混合检索 + 重排序 | 通用场景 | 中 |
| Self-RAG | 自我反思判断 | 高精度需求 | 中高 |
| GraphRAG | 知识图谱增强 | 复杂关系推理 | 高 |
| Adaptive RAG | 动态策略选择 | 多样化问题 | 中高 |
本章小结
- Self-RAG 通过自我反思提升回答的准确性和可信度
- GraphRAG 利用知识图谱进行关系推理,适合复杂查询
- Adaptive RAG 根据问题复杂度自动选择最佳策略
- 选择技术时要在效果、延迟、成本之间权衡
下一章:我们将学习 RAG 系统的评估方法。