评估指标详解
High Contrast
Dark Mode
Light Mode
Sepia
Forest
2 min read307 words

评估指标详解

本章深入讲解 LLM 评估的核心指标,从传统 NLP 指标到 LLM 专用指标。

指标分类

graph TB A[评估指标] --> B[文本匹配指标] A --> C[语义指标] A --> D[任务指标] B --> B1[BLEU] B --> B2[ROUGE] B --> B3[Exact Match] C --> C1[BERTScore] C --> C2[语义相似度] D --> D1[Pass@K 代码] D --> D2[F1 分类] D --> D3[忠实度 RAG] style A fill:#e3f2fd,stroke:#1976d2,stroke-width:3px

BLEU 和 ROUGE

"""
经典 NLP 评估指标实现
"""
from collections import Counter
import math
def calc_bleu(reference: str, hypothesis: str, n: int = 4) -> float:
"""
计算 BLEU 分数 (简化版)
衡量生成文本与参考文本的 n-gram 重叠度
Args:
reference: 参考答案
hypothesis: 模型输出
n: 最大 n-gram
"""
ref_tokens = reference.split()
hyp_tokens = hypothesis.split()
if len(hyp_tokens) == 0:
return 0.0
# 计算各 n-gram 的精确率
precisions = []
for i in range(1, n + 1):
ref_ngrams = Counter(
tuple(ref_tokens[j:j+i]) for j in range(len(ref_tokens) - i + 1)
)
hyp_ngrams = Counter(
tuple(hyp_tokens[j:j+i]) for j in range(len(hyp_tokens) - i + 1)
)
# 截断计数
clipped = sum(
min(count, ref_ngrams[ngram])
for ngram, count in hyp_ngrams.items()
)
total = sum(hyp_ngrams.values())
precision = clipped / total if total > 0 else 0
precisions.append(precision)
# 几何平均
if any(p == 0 for p in precisions):
return 0.0
log_avg = sum(math.log(p) for p in precisions) / len(precisions)
# 简短惩罚 (Brevity Penalty)
bp = 1.0
if len(hyp_tokens) < len(ref_tokens):
bp = math.exp(1 - len(ref_tokens) / len(hyp_tokens))
return bp * math.exp(log_avg)
def calc_rouge_l(reference: str, hypothesis: str) -> dict:
"""
计算 ROUGE-L 分数
基于最长公共子序列 (LCS)
"""
ref_tokens = reference.split()
hyp_tokens = hypothesis.split()
# 计算 LCS 长度
m, n = len(ref_tokens), len(hyp_tokens)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if ref_tokens[i-1] == hyp_tokens[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
lcs_len = dp[m][n]
precision = lcs_len / n if n > 0 else 0
recall = lcs_len / m if m > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return {"precision": precision, "recall": recall, "f1": f1}
# 使用
reference = "RAG 是检索增强生成技术 用于提升 LLM 的回答准确性"
hypothesis = "RAG 是一种检索增强生成的方法 能提升回答的准确性"
bleu = calc_bleu(reference, hypothesis)
rouge = calc_rouge_l(reference, hypothesis)
print(f"BLEU: {bleu:.4f}")
print(f"ROUGE-L: P={rouge['precision']:.4f} R={rouge['recall']:.4f} F1={rouge['f1']:.4f}")

BERTScore(语义匹配)

BLEU/ROUGE 基于字面匹配,而 BERTScore 衡量语义相似度:

"""
BERTScore - 基于语义嵌入的评估指标
"""
import numpy as np
class SimpleBERTScore:
"""简化版 BERTScore"""
def __init__(self):
from openai import OpenAI
self.client = OpenAI()
def score(self, references: list[str], hypotheses: list[str]) -> dict:
"""
计算 BERTScore
使用 Embedding 向量的余弦相似度作为近似
"""
scores = []
for ref, hyp in zip(references, hypotheses):
ref_emb = self._embed(ref)
hyp_emb = self._embed(hyp)
similarity = np.dot(ref_emb, hyp_emb) / (
np.linalg.norm(ref_emb) * np.linalg.norm(hyp_emb)
)
scores.append(float(similarity))
return {
"mean": np.mean(scores),
"scores": scores,
}
def _embed(self, text: str) -> np.ndarray:
response = self.client.embeddings.create(
model="text-embedding-3-small",
input=text,
)
return np.array(response.data[0].embedding)
# 使用
scorer = SimpleBERTScore()
result = scorer.score(
references=["人工智能正在改变世界"],
hypotheses=["AI 正在深刻影响全球社会"],
)
print(f"语义相似度: {result['mean']:.4f}")
# 即使字面不同,语义相近也会得高分

各指标对比

指标 衡量什么 优点 局限
BLEU n-gram精确率 快速、标准化 不考虑语义
ROUGE n-gram召回率 适合摘要评估 不考虑语义
Exact Match 完全匹配 严格、客观 过于严格
BERTScore 语义相似度 捕捉语义 计算成本高
Pass@K 代码正确率 客观 仅适用代码
F1 Score 精确率+召回率 平衡指标 需要标签

综合评估框架

"""
多指标综合评估框架
"""
class MetricSuite:
"""指标套件"""
def __init__(self):
self.metrics = {}
def register(self, name: str, func, weight: float = 1.0):
"""注册指标"""
self.metrics[name] = {"func": func, "weight": weight}
def evaluate(self, predictions: list[str], references: list[str]) -> dict:
"""运行所有指标"""
results = {}
for name, config in self.metrics.items():
scores = []
for pred, ref in zip(predictions, references):
score = config["func"](pred, ref)
scores.append(score)
avg = sum(scores) / len(scores) if scores else 0
results[name] = {
"score": round(avg, 4),
"weight": config["weight"],
}
# 加权总分
total_weight = sum(m["weight"] for m in results.values())
weighted = sum(
r["score"] * r["weight"] for r in results.values()
) / total_weight
results["weighted_average"] = round(weighted, 4)
# 打印报告
print("=== 评估报告 ===")
for name, r in results.items():
if isinstance(r, dict):
print(f"  {name}: {r['score']} (权重: {r['weight']})")
else:
print(f"  加权平均: {r}")
return results
# 使用
suite = MetricSuite()
suite.register(
"rouge_l", lambda p, r: calc_rouge_l(r, p)["f1"], weight=0.3
)
suite.register(
"bleu", lambda p, r: calc_bleu(r, p), weight=0.3
)
suite.register(
"length_ratio",
lambda p, r: min(len(p) / max(len(r), 1), 1.0),
weight=0.1,
)
preds = ["RAG 是检索增强生成方法", "向量数据库存储嵌入向量"]
refs = ["RAG 是检索增强生成技术", "向量数据库用于存储向量嵌入"]
results = suite.evaluate(preds, refs)

本章小结

下一章:学习 LLM-as-a-Judge,用大模型评估大模型。