A/B 测试与实验平台
High Contrast
Dark Mode
Light Mode
Sepia
Forest
1 min read224 words

A/B 测试与实验平台

LLM 应用的优化不能靠直觉——Prompt 的微小改动、模型切换、参数调整都需要数据驱动验证。

实验架构

graph TB A[用户请求] --> B[实验路由] B --> C[变体 A: GPT-4o] B --> D[变体 B: Claude 3.5] B --> E[变体 C: 本地模型] C --> F[结果收集] D --> F E --> F F --> G[指标计算] G --> H{统计显著?} H -->|是| I[自动切量/发布] H -->|否| J[继续实验] style A fill:#e3f2fd,stroke:#1976d2,stroke-width:3px style I fill:#e8f5e9,stroke:#388e3c,stroke-width:2px

核心指标

指标类别 具体指标 说明
质量 人工评分、LLM-as-Judge 答案相关性与准确度
延迟 P50/P95/P99 端到端响应时间
成本 Token 消耗、API 费用 单请求成本
用户 点赞率、重试率、留存 实际用户满意度

实验平台实现

"""
LLM A/B 测试框架
"""
import hashlib
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Any
class VariantStatus(Enum):
RUNNING = "running"
PAUSED = "paused"
WINNER = "winner"
LOSER = "loser"
@dataclass
class Variant:
"""实验变体"""
name: str
config: dict                # 模型/prompt/参数等
traffic_pct: float = 50.0   # 流量百分比
status: VariantStatus = VariantStatus.RUNNING
@dataclass
class ExperimentResult:
"""单次实验结果"""
variant_name: str
latency_ms: float
token_count: int
quality_score: float | None = None  # 0-1
user_feedback: int | None = None    # 1=thumbs up, -1=down
@dataclass
class VariantStats:
"""变体聚合统计"""
count: int = 0
total_latency: float = 0.0
total_tokens: int = 0
quality_scores: list[float] = field(default_factory=list)
positive_feedback: int = 0
negative_feedback: int = 0
@property
def avg_latency(self) -> float:
return self.total_latency / self.count if self.count else 0
@property
def avg_tokens(self) -> float:
return self.total_tokens / self.count if self.count else 0
@property
def avg_quality(self) -> float | None:
if not self.quality_scores:
return None
return sum(self.quality_scores) / len(self.quality_scores)
@property
def satisfaction_rate(self) -> float | None:
total = self.positive_feedback + self.negative_feedback
if total == 0:
return None
return self.positive_feedback / total
class Experiment:
"""一个 A/B 测试实验"""
def __init__(self, name: str, variants: list[Variant]):
self.name = name
self.variants = {v.name: v for v in variants}
self.stats: dict[str, VariantStats] = {
v.name: VariantStats() for v in variants
}
self.created_at = time.time()
def assign_variant(self, user_id: str) -> Variant:
"""基于用户 ID 一致性哈希分配变体"""
hash_val = int(
hashlib.sha256(
f"{self.name}:{user_id}".encode()
).hexdigest(), 16
)
bucket = hash_val % 100
cumulative = 0.0
for variant in self.variants.values():
if variant.status != VariantStatus.RUNNING:
continue
cumulative += variant.traffic_pct
if bucket < cumulative:
return variant
# Fallback 到第一个运行中的变体
for v in self.variants.values():
if v.status == VariantStatus.RUNNING:
return v
raise RuntimeError("No running variant")
def record_result(self, result: ExperimentResult):
"""记录实验结果"""
stats = self.stats[result.variant_name]
stats.count += 1
stats.total_latency += result.latency_ms
stats.total_tokens += result.token_count
if result.quality_score is not None:
stats.quality_scores.append(result.quality_score)
if result.user_feedback == 1:
stats.positive_feedback += 1
elif result.user_feedback == -1:
stats.negative_feedback += 1
def get_summary(self) -> dict[str, dict]:
"""获取实验摘要"""
summary = {}
for name, stats in self.stats.items():
summary[name] = {
"count": stats.count,
"avg_latency_ms": round(stats.avg_latency, 1),
"avg_tokens": round(stats.avg_tokens, 1),
"avg_quality": (
round(stats.avg_quality, 3) if stats.avg_quality else None
),
"satisfaction": (
round(stats.satisfaction_rate, 3)
if stats.satisfaction_rate is not None
else None
),
"status": self.variants[name].status.value,
}
return summary

LLM-as-Judge 评估

"""
使用 LLM 自动评估实验质量
"""
from dataclasses import dataclass
@dataclass
class JudgeResult:
score: float          # 0-1
reasoning: str
criteria_scores: dict[str, float]
JUDGE_PROMPT = """请评估以下 AI 助手的回答质量。
用户问题:{question}
AI 回答:{answer}
评分标准(每项 0-1 分):
1. 准确性:信息是否正确
2. 完整性:是否充分回答了问题
3. 清晰度:表述是否清晰易懂
4. 实用性:是否有实际帮助
请按 JSON 格式输出:
{{"accuracy": 0.0, "completeness": 0.0, "clarity": 0.0, "usefulness": 0.0, "reasoning": "..."}}
"""
class LLMJudge:
"""LLM 自动评估"""
def __init__(self, judge_model: str = "gpt-4o"):
self.judge_model = judge_model
def evaluate(self, question: str, answer: str) -> JudgeResult:
"""评估一条回答"""
prompt = JUDGE_PROMPT.format(question=question, answer=answer)
# 调用 judge 模型
response = self._call_model(prompt)
scores = self._parse_scores(response)
overall = sum(scores.values()) / len(scores) if scores else 0
return JudgeResult(
score=overall,
reasoning=scores.pop("reasoning", ""),
criteria_scores=scores,
)
def _call_model(self, prompt: str) -> str:
"""调用评审模型(简化示意)"""
return '{"accuracy": 0.8, "completeness": 0.9, "clarity": 0.85, "usefulness": 0.9, "reasoning": "回答准确且完整"}'
def _parse_scores(self, response: str) -> dict:
"""解析评分"""
import json
try:
return json.loads(response)
except json.JSONDecodeError:
return {}

实验决策流程

graph LR A[启动实验] --> B[收集数据
最少 1000 样本] B --> C[统计检验
p < 0.05] C --> D{显著差异?} D -->|是| E[宣布胜者] D -->|否| F{样本充足?} F -->|否| B F -->|是| G[无显著差异
选低成本方案] E --> H[全量切换] G --> H style A fill:#e3f2fd,stroke:#1976d2,stroke-width:3px style H fill:#e8f5e9,stroke:#388e3c,stroke-width:2px

本章小结

主题 要点
一致性分流 同一用户始终看到相同变体
多维指标 质量 + 延迟 + 成本 + 用户满意度
LLM-as-Judge 用 GPT-4o 等做自动评分
统计显著性 至少 1000 样本,p < 0.05

下一章:日志与审计