企业级评估体系构建
High Contrast
Dark Mode
Light Mode
Sepia
Forest
2 min read331 words

企业级评估体系构建

从个人项目的"跑一次评估脚本"到企业级"持续评估平台",需要在组织、流程、工具和数据四个层面系统建设。

企业评估平台架构

graph TB A[企业评估平台] --> B[数据层
Data Layer] A --> C[计算层
Compute Layer] A --> D[分析层
Analytics Layer] A --> E[集成层
Integration Layer] B --> B1[评估数据集仓库
黄金答案库
版本管理] C --> C1[评估任务调度
并发运行控制
结果存储] D --> D1[指标看板
趋势分析
告警规则] E --> E1[CI/CD接入
模型注册表
实验追踪] style A fill:#ede7f6,stroke:#5e35b1,stroke-width:2px style D fill:#e3f2fd,stroke:#1565c0,stroke-width:2px style E fill:#c8e6c9,stroke:#43a047,stroke-width:2px

评估平台核心组件

from dataclasses import dataclass, field
from enum import Enum
from datetime import datetime
from typing import Callable
import json
import hashlib
class EvalJobStatus(Enum):
PENDING = "pending"
RUNNING = "running"
COMPLETED = "completed"
FAILED = "failed"
@dataclass
class ModelVersion:
"""模型版本注册"""
model_id: str
version: str
endpoint: str
parameters: dict            # temperature, max_tokens 等
deployed_at: datetime
tags: list[str] = field(default_factory=list)
@property
def full_id(self) -> str:
return f"{self.model_id}:{self.version}"
@dataclass
class EvalJob:
"""评估任务"""
job_id: str
model_version: ModelVersion
dataset_name: str
metrics: list[str]
created_at: datetime = field(default_factory=datetime.now)
status: EvalJobStatus = EvalJobStatus.PENDING
results: dict = field(default_factory=dict)
error_message: str = ""
@property
def is_terminal(self) -> bool:
return self.status in (EvalJobStatus.COMPLETED, EvalJobStatus.FAILED)
class EvalPlatform:
"""企业级评估平台"""
def __init__(self, platform_name: str):
self.name = platform_name
self.model_registry: dict[str, ModelVersion] = {}
self.dataset_registry: dict[str, list[dict]] = {}
self.jobs: list[EvalJob] = []
self.quality_gates: dict[str, float] = {}   # metric -> min_threshold
def register_model(self, model_version: ModelVersion) -> None:
"""注册模型版本"""
self.model_registry[model_version.full_id] = model_version
print(f"[Registry] 注册模型: {model_version.full_id}")
def register_dataset(self, name: str, samples: list[dict]) -> None:
"""注册评估数据集"""
self.dataset_registry[name] = samples
checksum = hashlib.md5(
json.dumps(samples, ensure_ascii=False).encode()
).hexdigest()[:8]
print(f"[Dataset] 注册数据集 '{name}': {len(samples)} 条 (md5:{checksum})")
def set_quality_gate(self, metric: str, min_threshold: float) -> None:
"""设置质量门禁阈值"""
self.quality_gates[metric] = min_threshold
print(f"[Gate] {metric} 门禁阈值设为 {min_threshold}")
def submit_job(
self,
model_full_id: str,
dataset_name: str,
metrics: list[str],
) -> EvalJob:
"""提交评估任务"""
model = self.model_registry.get(model_full_id)
if not model:
raise ValueError(f"未注册的模型: {model_full_id}")
if dataset_name not in self.dataset_registry:
raise ValueError(f"未注册的数据集: {dataset_name}")
job_id = f"job-{len(self.jobs)+1:04d}"
job = EvalJob(
job_id=job_id,
model_version=model,
dataset_name=dataset_name,
metrics=metrics,
)
self.jobs.append(job)
print(f"[Job] 提交评估任务: {job_id} ({model_full_id} × {dataset_name})")
return job
def run_job(
self,
job: EvalJob,
eval_fn: Callable[[list[dict], ModelVersion], dict[str, float]],
) -> dict:
"""运行评估任务并执行质量门禁检查"""
job.status = EvalJobStatus.RUNNING
try:
dataset = self.dataset_registry[job.dataset_name]
scores = eval_fn(dataset, job.model_version)
job.results = scores
job.status = EvalJobStatus.COMPLETED
# 质量门禁检查
gate_results = self._check_quality_gates(scores)
return {
"job_id": job.job_id,
"status": "completed",
"scores": scores,
"quality_gate": gate_results,
"passed_gates": gate_results["passed"],
}
except Exception as e:
job.status = EvalJobStatus.FAILED
job.error_message = str(e)
return {"job_id": job.job_id, "status": "failed", "error": str(e)}
def _check_quality_gates(self, scores: dict[str, float]) -> dict:
"""对照质量门禁阈值检查"""
failures = []
for metric, min_val in self.quality_gates.items():
actual = scores.get(metric)
if actual is not None and actual < min_val:
failures.append({
"metric": metric,
"required": min_val,
"actual": round(actual, 4),
"gap": round(min_val - actual, 4),
})
return {
"passed": len(failures) == 0,
"failures": failures,
"checked_gates": len(self.quality_gates),
}
def comparison_report(self, job_id_a: str, job_id_b: str) -> dict:
"""对比两个评估任务的结果"""
def find_job(jid: str) -> EvalJob | None:
return next((j for j in self.jobs if j.job_id == jid), None)
job_a = find_job(job_id_a)
job_b = find_job(job_id_b)
if not job_a or not job_b:
return {"error": "找不到指定任务"}
comparison = {}
all_metrics = set(job_a.results) | set(job_b.results)
for metric in all_metrics:
a_val = job_a.results.get(metric, 0)
b_val = job_b.results.get(metric, 0)
delta = round(b_val - a_val, 4)
comparison[metric] = {
"before": a_val,
"after": b_val,
"delta": delta,
"improved": delta > 0.01,
"regressed": delta < -0.01,
}
return {
"version_a": job_a.model_version.full_id,
"version_b": job_b.model_version.full_id,
"metrics_comparison": comparison,
}
# 使用示例
platform = EvalPlatform("企业 LLM 评估平台")
# 注册模型
platform.register_model(ModelVersion(
model_id="cs-bot",
version="v2.1",
endpoint="https://api.internal.example.com/v2.1/completions",
parameters={"temperature": 0.1, "max_tokens": 512},
deployed_at=datetime.now(),
tags=["production", "customer-service"],
))
# 设置质量门禁
platform.set_quality_gate("accuracy", 0.80)
platform.set_quality_gate("safety", 0.95)
platform.set_quality_gate("instruction_following", 0.85)

评估团队的职责矩阵

角色 职责 评估相关任务
ML Engineer 模型开发/微调 提交评估任务,分析指标异常
Eval Engineer 评估平台维护 更新数据集,优化评估流程
Red Team 安全测试 设计对抗样本,报告安全漏洞
产品经理 业务指标定义 定义质量门禁阈值
数据标注 黄金答案维护 定期标注新样本

本章小结

下一章:评估最佳实践总结