成本优化与缓存策略
High Contrast
Dark Mode
Light Mode
Sepia
Forest
2 min read372 words

成本优化与缓存策略

RAG 系统的运营成本主要来自 Embedding API、LLM API 和向量数据库。不做优化的 RAG 系统,成本可能是优化后的 5-10 倍。

RAG 成本构成

graph TB A[RAG 运营成本] --> B[Embedding 成本] A --> C[LLM 生成成本] A --> D[向量数据库成本] A --> E[基础设施成本] B --> B1[文档索引: 一次性] B --> B2[查询 Embedding: 每次查询] C --> C1[上下文 Token: 检索文档] C --> C2[生成 Token: 回答] D --> D1[存储: 向量数据量] D --> D2[查询: QPS] style A fill:#ffebee,stroke:#c62828,stroke-width:3px style C fill:#fff3e0,stroke:#f57c00,stroke-width:2px
成本项 典型占比 优化空间 优先级
LLM 生成 Token 50-70% P0
Embedding API 10-20% P1
向量数据库 10-20% P2
基础设施 5-10% P3

语义缓存

语义缓存(Semantic Cache)是 RAG 成本优化的第一手段:相似的查询直接返回缓存结果,跳过检索和生成。

"""
语义缓存实现
"""
from dataclasses import dataclass, field
import hashlib
import time
@dataclass
class CacheEntry:
"""缓存条目"""
query: str
query_embedding: list[float]
answer: str
sources: list[dict]
created_at: float = field(default_factory=time.time)
ttl_seconds: int = 3600
hit_count: int = 0
@property
def is_expired(self) -> bool:
return time.time() - self.created_at > self.ttl_seconds
class SemanticCache:
"""语义缓存"""
def __init__(self, embed_client, similarity_threshold: float = 0.92):
self.embedder = embed_client
self.threshold = similarity_threshold
self.cache: list[CacheEntry] = []
def get(self, query: str) -> CacheEntry | None:
"""语义匹配查找缓存"""
query_embedding = self.embedder.embed(query)
best_match = None
best_similarity = 0.0
for entry in self.cache:
if entry.is_expired:
continue
similarity = self._cosine_similarity(query_embedding, entry.query_embedding)
if similarity > self.threshold and similarity > best_similarity:
best_similarity = similarity
best_match = entry
if best_match:
best_match.hit_count += 1
print(f"  缓存命中 (相似度: {best_similarity:.3f})")
return best_match
return None
def put(self, query: str, answer: str, sources: list[dict], ttl: int = 3600) -> None:
"""写入缓存"""
query_embedding = self.embedder.embed(query)
entry = CacheEntry(
query=query,
query_embedding=query_embedding,
answer=answer,
sources=sources,
ttl_seconds=ttl,
)
self.cache.append(entry)
# 清理过期条目
self.cache = [e for e in self.cache if not e.is_expired]
def _cosine_similarity(self, a: list[float], b: list[float]) -> float:
"""余弦相似度"""
dot_product = sum(x * y for x, y in zip(a, b))
norm_a = sum(x * x for x in a) ** 0.5
norm_b = sum(x * x for x in b) ** 0.5
if norm_a == 0 or norm_b == 0:
return 0.0
return dot_product / (norm_a * norm_b)
def get_stats(self) -> dict:
"""缓存统计"""
active = [e for e in self.cache if not e.is_expired]
return {
"total_entries": len(active),
"total_hits": sum(e.hit_count for e in active),
"avg_hits_per_entry": (
sum(e.hit_count for e in active) / len(active) if active else 0
),
}

Token 优化策略

graph TB A[Token 优化] --> B[上下文压缩] A --> C[动态 Top-K] A --> D[摘要替代原文] A --> E[小模型路由] B --> B1[LLMLingua 压缩
保留关键信息] C --> C1[简单问题少检索
复杂问题多检索] D --> D1[长文档先摘要
减少 Token 数] E --> E1[简单问题用小模型
复杂问题用大模型] style A fill:#e3f2fd,stroke:#1976d2,stroke-width:3px
"""
Token 优化策略
"""
from dataclasses import dataclass
@dataclass
class ModelTier:
"""模型层级"""
name: str
model_id: str
cost_per_1k_tokens: float
max_context: int
class CostAwareRouter:
"""成本感知的模型路由器"""
TIERS = [
ModelTier("small", "gpt-4o-mini", 0.00015, 128000),
ModelTier("medium", "gpt-4o", 0.0025, 128000),
ModelTier("large", "claude-3.5-sonnet", 0.003, 200000),
]
def __init__(self, llm_clients: dict):
self.clients = llm_clients
def route(self, query: str, context_tokens: int) -> ModelTier:
"""根据查询复杂度选择模型"""
complexity = self._estimate_complexity(query, context_tokens)
if complexity == "simple":
return self.TIERS[0]  # 小模型
elif complexity == "medium":
return self.TIERS[1]  # 中等模型
else:
return self.TIERS[2]  # 大模型
def _estimate_complexity(self, query: str, context_tokens: int) -> str:
"""估算查询复杂度"""
word_count = len(query.split())
has_reasoning = any(kw in query for kw in ["为什么", "分析", "比较", "评估", "总结"])
has_multi_hop = "和" in query and "?" in query or "?" in query
if has_multi_hop or (has_reasoning and context_tokens > 3000):
return "complex"
elif has_reasoning or word_count > 20:
return "medium"
return "simple"
def estimate_cost(self, tier: ModelTier, input_tokens: int, output_tokens: int) -> float:
"""估算单次查询成本(美元)"""
return (input_tokens + output_tokens) * tier.cost_per_1k_tokens / 1000
class ContextCompressor:
"""上下文压缩器"""
def compress(self, documents: list[dict], query: str, max_tokens: int = 3000) -> str:
"""压缩检索文档到指定 Token 数内"""
total_text = ""
for doc in documents:
content = doc.get("content", "")
# 提取与查询最相关的段落
paragraphs = content.split("\n\n")
relevant = [p for p in paragraphs if self._is_relevant(p, query)]
if relevant:
total_text += "\n".join(relevant[:2]) + "\n\n"
# 粗略估算 Token 数(中文约1字=1.5 token)
estimated_tokens = len(total_text) * 1.5
if estimated_tokens > max_tokens:
break
return total_text.strip()
def _is_relevant(self, paragraph: str, query: str) -> bool:
"""判断段落是否与查询相关(简化版)"""
if len(paragraph.strip()) < 20:
return False
query_words = set(query.lower().split())
para_words = set(paragraph.lower().split())
overlap = query_words & para_words
return len(overlap) >= 1

成本优化效果对比

优化策略 成本降低 延迟影响 质量影响 实施难度
语义缓存 30-50% 降低(命中时)
小模型路由 20-40% 降低 简单题无影响
上下文压缩 15-30% 略增 可能遗漏
动态 Top-K 10-20% 降低 应自适应调整
批量处理 10-15% 增加
Embedding 缓存 5-10% 降低

本章小结

主题 要点
成本构成 LLM Token 占最大比例(50-70%)
语义缓存 相似查询直接返回,节省 30-50%
模型路由 简单问题用小模型,复杂问题用大模型
上下文压缩 减少输入 Token,控制成本
优化顺序 缓存 → 路由 → 压缩 → 动态 Top-K

下一章:企业知识库实战