客服模型微调实战
从零搭建一个智能客服系统——数据准备、训练、评估、部署的完整流程。
项目架构
graph LR
RAW[历史客服记录] --> CLEAN[数据清洗]
CLEAN --> FORMAT[格式化]
FORMAT --> SPLIT[数据集划分]
SPLIT --> TRAIN[QLoRA 微调]
TRAIN --> EVAL[评估]
EVAL --> QUANT[量化 AWQ]
QUANT --> DEPLOY[vLLM 部署]
DEPLOY --> MONITOR[线上监控]
style RAW fill:#fff3e0,stroke:#f57c00,stroke-width:2px
style DEPLOY fill:#c8e6c9,stroke:#388e3c,stroke-width:2px
第一步:数据准备
"""
客服数据准备全流程
"""
import json
import re
from pathlib import Path
class CustomerServiceDataPipeline:
"""客服数据处理管道"""
SYSTEM_PROMPT = (
"你是一个专业的客户服务助手。"
"请用友善、专业的语气回答用户问题。"
"如果不确定答案,请诚实告知并建议联系人工客服。"
)
def __init__(self):
self.data = []
def load_raw_data(self, filepath: str) -> list[dict]:
"""加载原始客服对话记录"""
with open(filepath, "r", encoding="utf-8") as f:
raw = json.load(f)
print(f"加载 {len(raw)} 条原始记录")
return raw
def clean_conversation(self, conv: dict) -> dict | None:
"""清洗单条对话"""
user_msg = conv.get("user_message", "").strip()
agent_msg = conv.get("agent_response", "").strip()
# 过滤无效数据
if len(user_msg) < 5 or len(agent_msg) < 10:
return None
if len(agent_msg) > 2000:
return None
# 去除 PII
agent_msg = re.sub(
r"\b\d{11}\b", "[电话号码]", agent_msg
)
agent_msg = re.sub(
r"[\w.+-]+@[\w-]+\.[\w.]+", "[邮箱]", agent_msg
)
# 格式化为对话格式
return {
"messages": [
{"role": "system", "content": self.SYSTEM_PROMPT},
{"role": "user", "content": user_msg},
{"role": "assistant", "content": agent_msg},
]
}
def process_multi_turn(self, conv: dict) -> dict | None:
"""处理多轮对话"""
turns = conv.get("turns", [])
if len(turns) < 2:
return None
messages = [
{"role": "system", "content": self.SYSTEM_PROMPT}
]
for turn in turns:
role = "user" if turn["speaker"] == "customer" else "assistant"
messages.append({
"role": role,
"content": turn["text"].strip(),
})
# 确保以 assistant 结尾
if messages[-1]["role"] != "assistant":
return None
return {"messages": messages}
def build_dataset(
self,
raw_data: list[dict],
output_path: str,
) -> dict:
"""构建完整数据集"""
processed = []
skipped = 0
for conv in raw_data:
result = self.clean_conversation(conv)
if result:
processed.append(result)
else:
skipped += 1
# 划分数据集
import random
random.seed(42)
random.shuffle(processed)
n = len(processed)
train_end = int(n * 0.85)
val_end = int(n * 0.95)
splits = {
"train": processed[:train_end],
"validation": processed[train_end:val_end],
"test": processed[val_end:],
}
# 保存
output = Path(output_path)
output.mkdir(exist_ok=True)
for split_name, data in splits.items():
filepath = output / f"{split_name}.jsonl"
with open(filepath, "w", encoding="utf-8") as f:
for item in data:
f.write(json.dumps(item, ensure_ascii=False) + "\n")
stats = {
"总样本": n,
"训练集": len(splits["train"]),
"验证集": len(splits["validation"]),
"测试集": len(splits["test"]),
"过滤数": skipped,
}
print("数据集统计:", stats)
return stats
# 使用
pipeline = CustomerServiceDataPipeline()
print("=== 客服数据管道 ===")
print(f"System Prompt: {pipeline.SYSTEM_PROMPT[:50]}...")
print("\n数据处理流程:")
print(" 1. load_raw_data() - 加载原始数据")
print(" 2. clean_conversation() - 清洗和过滤")
print(" 3. build_dataset() - 划分和保存")
第二步:模型训练
"""
客服模型 QLoRA 微调
"""
TRAINING_SCRIPT = """
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
TrainingArguments,
)
from peft import LoraConfig, get_peft_model
from trl import SFTTrainer
# ===== 配置 =====
MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct"
DATA_PATH = "./cs-dataset"
OUTPUT_DIR = "./cs-model-v1"
# ===== 量化配置 =====
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
)
# ===== 加载模型 =====
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
tokenizer.pad_token = tokenizer.eos_token
# ===== LoRA 配置 =====
lora_config = LoraConfig(
r=64, # 客服场景用较大 rank
lora_alpha=128,
target_modules=[
"q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",
],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
# ===== 加载数据 =====
dataset = load_dataset("json", data_files={
"train": f"{DATA_PATH}/train.jsonl",
"validation": f"{DATA_PATH}/validation.jsonl",
})
# ===== 训练配置 =====
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
num_train_epochs=3,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-4,
lr_scheduler_type="cosine",
warmup_ratio=0.05,
logging_steps=10,
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=3,
bf16=True,
report_to="wandb",
run_name="cs-model-v1",
)
# ===== 开始训练 =====
trainer = SFTTrainer(
model=model,
args=training_args,
train_dataset=dataset["train"],
eval_dataset=dataset["validation"],
tokenizer=tokenizer,
max_seq_length=2048,
)
trainer.train()
trainer.save_model(f"{OUTPUT_DIR}/final")
print("✅ 训练完成!")
"""
print("=== 训练脚本 ===")
print("模型: Qwen2.5-7B-Instruct + QLoRA")
print("LoRA rank: 64, alpha: 128")
print("训练: 3 epochs, lr=2e-4, cosine schedule")
第三步:评估验收
"""
客服模型评估
"""
class CustomerServiceEval:
"""客服模型评估"""
EVAL_DIMENSIONS = {
"回答准确性": {
"说明": "回答是否正确,信息是否准确",
"权重": 0.3,
"目标": "> 90%",
},
"服务态度": {
"说明": "语气是否友善、专业",
"权重": 0.2,
"目标": "> 95%",
},
"完整性": {
"说明": "是否完整回答了用户问题",
"权重": 0.25,
"目标": "> 85%",
},
"安全性": {
"说明": "是否泄露敏感信息或给出危险建议",
"权重": 0.25,
"目标": "100%",
},
}
TEST_CASES = [
{
"category": "退款咨询",
"input": "我买的东西质量有问题,想申请退款",
"expected_keywords": ["退款", "售后", "工单"],
},
{
"category": "物流查询",
"input": "我的订单发货了吗?快递到哪了?",
"expected_keywords": ["物流", "快递", "查询"],
},
{
"category": "账户问题",
"input": "我忘记密码了,怎么找回?",
"expected_keywords": ["密码", "重置", "验证"],
},
{
"category": "越界问题",
"input": "帮我写一段代码",
"expected_keywords": ["抱歉", "范围", "客服"],
},
]
cs_eval = CustomerServiceEval()
print("=== 客服评估维度 ===")
for dim, info in cs_eval.EVAL_DIMENSIONS.items():
print(f" {dim} (权重 {info['权重']}): 目标 {info['目标']}")
print(f"\n测试用例: {len(cs_eval.TEST_CASES)} 条")
第四步:部署上线
"""
部署配置
"""
DEPLOYMENT = {
"量化": "AWQ 4-bit",
"推理引擎": "vLLM",
"硬件": "1x A10G (24GB)",
"并发": "~30 QPS",
"Docker 配置": """
FROM vllm/vllm-openai:latest
COPY ./cs-model-awq /model
ENV MODEL_NAME=/model
ENV MAX_MODEL_LEN=2048
ENV GPU_MEMORY_UTILIZATION=0.9
CMD ["python", "-m", "vllm.entrypoints.openai.api_server", \\
"--model", "/model", \\
"--quantization", "awq", \\
"--max-model-len", "2048", \\
"--port", "8000"]
""",
"Kubernetes 配置": """
apiVersion: apps/v1
kind: Deployment
metadata:
name: cs-model
spec:
replicas: 2
template:
spec:
containers:
- name: model
image: cs-model:v1.0.0
resources:
limits:
nvidia.com/gpu: 1
ports:
- containerPort: 8000
livenessProbe:
httpGet:
path: /health
port: 8000
""",
}
print("=== 部署配置 ===")
for k, v in DEPLOYMENT.items():
if k not in ("Docker 配置", "Kubernetes 配置"):
print(f" {k}: {v}")
项目总结
| 阶段 | 耗时 | 成本 | 产出 |
|---|---|---|---|
| 数据准备 | 1-2 周 | 人工 | 5000+ 标注样本 |
| 模型训练 | 4-6 小时 | GPU $10-30 | QLoRA 适配器 |
| 评估测试 | 1-2 天 | 人工 + GPT-4 | 评估报告 |
| 量化部署 | 半天 | 无 | AWQ 模型 |
| 线上运行 | 持续 | $720/月 | 30 QPS 服务 |
下一章:代码与领域模型微调——更多实战场景。