评估框架搭建实战
High Contrast
Dark Mode
Light Mode
Sepia
Forest
2 min read313 words

评估框架搭建实战

从零搭建 LLM 评估系统

本章通过一个完整案例,展示如何为企业级 LLM 应用搭建自动化评估框架——从数据集构建到指标计算,再到持续集成。

graph TB A[评估框架架构] --> B[数据层] A --> C[评估层] A --> D[报告层] A --> E[集成层] B --> B1[测试数据集管理] B --> B2[标注数据版本控制] B --> B3[动态数据生成] C --> C1[自动指标计算] C --> C2[LLM Judge 评估] C --> C3[安全检查] D --> D1[评估报告生成] D --> D2[历史趋势可视化] D --> D3[告警与通知] E --> E1[CI/CD 集成] E --> E2[模型注册表] E --> E3[A/B 测试平台] style A fill:#e3f2fd,stroke:#1976d2,stroke-width:3px style C fill:#c8e6c9,stroke:#388e3c,stroke-width:2px style E fill:#fff3e0,stroke:#f57c00,stroke-width:2px

Step 1:构建评估数据集

"""
评估数据集管理
"""
import json
import hashlib
from pathlib import Path
from dataclasses import dataclass, asdict
from datetime import datetime
@dataclass
class EvalSample:
"""评估样本"""
id: str
category: str
input_prompt: str
expected_output: str = ""
reference_answer: str = ""
metadata: dict = None
def __post_init__(self):
if not self.id:
# 自动生成稳定 ID
content = f"{self.category}:{self.input_prompt}"
self.id = hashlib.md5(content.encode()).hexdigest()[:8]
class EvalDataset:
"""评估数据集"""
def __init__(self, name: str, version: str = "1.0"):
self.name = name
self.version = version
self.samples: list[EvalSample] = []
self.created_at = datetime.now().isoformat()
def add_sample(self, category: str, prompt: str, expected: str = "", reference: str = ""):
"""添加评估样本"""
sample = EvalSample(
id="",
category=category,
input_prompt=prompt,
expected_output=expected,
reference_answer=reference,
)
self.samples.append(sample)
return self
def save(self, path: str):
"""保存数据集"""
data = {
"name": self.name,
"version": self.version,
"created_at": self.created_at,
"sample_count": len(self.samples),
"samples": [asdict(s) for s in self.samples],
}
Path(path).write_text(json.dumps(data, ensure_ascii=False, indent=2))
print(f"数据集已保存: {path} ({len(self.samples)} 条样本)")
def load(self, path: str):
"""加载数据集"""
data = json.loads(Path(path).read_text())
self.name = data["name"]
self.version = data["version"]
self.samples = [EvalSample(**s) for s in data["samples"]]
print(f"已加载: {self.name} v{self.version} ({len(self.samples)} 条样本)")
return self
def by_category(self, category: str) -> list[EvalSample]:
"""按分类筛选"""
return [s for s in self.samples if s.category == category]
@property
def categories(self) -> list[str]:
"""所有分类"""
return list(set(s.category for s in self.samples))
# 构建 QA 评估数据集示例
qa_dataset = EvalDataset("customer-support-qa", version="2.0")
qa_dataset.add_sample("退货政策", "你们的退货期限是多长?", expected="30天内可退货")
qa_dataset.add_sample("退货政策", "退货需要什么条件?", expected="商品未拆封、附带原始发票")
qa_dataset.add_sample("配送问题", "国际配送需要几天?", expected="7-14个工作日")
qa_dataset.add_sample("账户安全", "如何修改密码?", expected="进入设置-安全-修改密码")
print(f"数据集: {qa_dataset.name}")
print(f"分类: {qa_dataset.categories}")
print(f"样本数: {len(qa_dataset.samples)}")

Step 2:自动化评估引擎

"""
自动化评估引擎
"""
from abc import ABC, abstractmethod
class BaseEvaluator(ABC):
"""评估器基类"""
@abstractmethod
def evaluate(self, prediction: str, reference: str) -> float:
"""返回 0.0-1.0 的评分"""
pass
class ExactMatchEvaluator(BaseEvaluator):
"""精确匹配评估"""
def evaluate(self, prediction: str, reference: str) -> float:
return 1.0 if prediction.strip() == reference.strip() else 0.0
class ContainsKeyEvaluator(BaseEvaluator):
"""关键信息包含评估"""
def __init__(self, key_phrases: list[str]):
self.key_phrases = key_phrases
def evaluate(self, prediction: str, reference: str) -> float:
matched = sum(1 for kp in self.key_phrases if kp in prediction)
return matched / len(self.key_phrases) if self.key_phrases else 0.0
class LengthRatioEvaluator(BaseEvaluator):
"""长度合理性评估"""
def __init__(self, min_ratio: float = 0.5, max_ratio: float = 2.0):
self.min_ratio = min_ratio
self.max_ratio = max_ratio
def evaluate(self, prediction: str, reference: str) -> float:
if not reference:
return 1.0
ratio = len(prediction) / len(reference)
if self.min_ratio <= ratio <= self.max_ratio:
return 1.0
return max(0.0, 1.0 - abs(ratio - 1.0) / 2)
class EvaluationEngine:
"""评估引擎"""
def __init__(self):
self.evaluators: dict[str, tuple[BaseEvaluator, float]] = {}
def add_evaluator(self, name: str, evaluator: BaseEvaluator, weight: float = 1.0):
"""添加评估器"""
self.evaluators[name] = (evaluator, weight)
return self
def evaluate_single(self, prediction: str, reference: str) -> dict:
"""评估单条结果"""
scores = {}
weighted_sum = 0
total_weight = 0
for name, (evaluator, weight) in self.evaluators.items():
score = evaluator.evaluate(prediction, reference)
scores[name] = round(score, 4)
weighted_sum += score * weight
total_weight += weight
scores["overall"] = round(weighted_sum / total_weight, 4) if total_weight else 0
return scores
def evaluate_batch(self, predictions: list[str], references: list[str]) -> dict:
"""批量评估"""
all_scores = []
for pred, ref in zip(predictions, references):
scores = self.evaluate_single(pred, ref)
all_scores.append(scores)
# 计算汇总
metric_names = list(self.evaluators.keys()) + ["overall"]
summary = {}
for metric in metric_names:
values = [s[metric] for s in all_scores]
summary[metric] = {
"mean": round(sum(values) / len(values), 4),
"min": round(min(values), 4),
"max": round(max(values), 4),
}
return {"individual": all_scores, "summary": summary}
# 使用示例
engine = EvaluationEngine()
engine.add_evaluator("关键信息", ContainsKeyEvaluator(["30天", "退货"]), weight=0.5)
engine.add_evaluator("长度合理", LengthRatioEvaluator(), weight=0.2)
engine.add_evaluator("精确匹配", ExactMatchEvaluator(), weight=0.3)
result = engine.evaluate_single(
prediction="我们的退货政策是30天内可退货,需保持商品完好。",
reference="30天内可退货"
)
print("评估结果:", result)

Step 3:CI/CD 集成

graph LR A[代码提交] --> B[触发评估] B --> C{评估通过?} C -->|是| D[允许合并] C -->|否| E[阻止合并] E --> F[生成报告] F --> G[开发者修复] G --> A D --> H[部署金丝雀] H --> I[线上 A/B 测试] I --> J{指标达标?} J -->|是| K[全量发布] J -->|否| L[回滚] style C fill:#fff3e0,stroke:#f57c00,stroke-width:2px style J fill:#fff3e0,stroke:#f57c00,stroke-width:2px style K fill:#c8e6c9,stroke:#388e3c,stroke-width:2px
集成环节 触发条件 评估内容 阈值
PR 检查 每次 PR 核心指标回归 不低于主干 -2%
日常回归 每日定时 全量测试集 绝对分数 >0.85
上线前 发布阶段 安全 + 性能 + 质量 全部绿灯
线上监控 持续 采样评估 异常告警

本章小结

下一章:查看 RAG 系统评估的完整实战案例。