数据收集与格式化
微调效果的上限由数据决定——"Garbage In, Garbage Out"。
数据准备流程
graph LR
A[原始数据] --> B[数据收集]
B --> C[数据清洗]
C --> D[格式标准化]
D --> E[质量验证]
E --> F{合格?}
F -->|是| G[数据集划分]
F -->|否| C
G --> H[训练/验证/测试集]
style A fill:#ffcdd2,stroke:#c62828
style H fill:#c8e6c9,stroke:#388e3c,stroke-width:2px
数据格式标准
"""
微调数据格式:三大标准格式
"""
import json
class DataFormats:
"""训练数据格式"""
# 格式一:指令格式(最常用)
INSTRUCTION_FORMAT = {
"说明": "instruction + input + output 三元组",
"适用": "指令微调、对话模型",
"示例": [
{
"instruction": "将以下文本翻译成英文",
"input": "今天天气很好",
"output": "The weather is nice today",
},
{
"instruction": "用一句话总结以下段落",
"input": "人工智能正在改变各行各业...",
"output": "AI 正在全面革新传统行业",
},
],
}
# 格式二:对话格式(ChatML)
CHAT_FORMAT = {
"说明": "多轮对话的 messages 格式",
"适用": "对话模型微调",
"示例": {
"messages": [
{"role": "system", "content": "你是一个专业的Python助手"},
{"role": "user", "content": "如何读取JSON文件?"},
{"role": "assistant", "content": (
"使用 json 模块:\n"
"```python\n"
"import json\n"
"with open('data.json') as f:\n"
" data = json.load(f)\n"
"```"
)},
]
},
}
# 格式三:补全格式(Completion)
COMPLETION_FORMAT = {
"说明": "简单的 prompt + completion 对",
"适用": "文本补全、简单任务",
"示例": {
"prompt": "将情感分类为正面或负面:这个产品很好用 ->",
"completion": " 正面",
},
}
# OpenAI 微调格式
OPENAI_FORMAT = {
"说明": "OpenAI API 微调专用的 JSONL 格式",
"示例": {
"messages": [
{"role": "system", "content": "你是客服助手"},
{"role": "user", "content": "我要退货"},
{"role": "assistant", "content": (
"好的,请提供您的订单号,"
"我来帮您处理退货申请。"
)},
]
},
}
@staticmethod
def save_jsonl(data: list[dict], filepath: str):
"""保存为 JSONL 格式"""
with open(filepath, "w", encoding="utf-8") as f:
for item in data:
f.write(json.dumps(item, ensure_ascii=False) + "\n")
print(f"保存 {len(data)} 条到 {filepath}")
# 演示
formats = DataFormats()
print("=== 指令格式 ===")
print(json.dumps(formats.INSTRUCTION_FORMAT["示例"][0],
ensure_ascii=False, indent=2))
print("\n=== 对话格式 ===")
print(json.dumps(formats.CHAT_FORMAT["示例"],
ensure_ascii=False, indent=2))
数据收集策略
"""
如何收集高质量微调数据
"""
class DataCollectionStrategy:
"""数据收集方法"""
STRATEGIES = {
"人工标注": {
"方法": "专家编写高质量问答对",
"成本": "高($0.5-5/条)",
"质量": "⭐⭐⭐⭐⭐",
"数量": "100-5000 条",
"适用": "核心业务场景",
"技巧": [
"先写标注指南,统一标准",
"多人标注 + 交叉验证",
"定期审查和更新指南",
],
},
"历史数据提炼": {
"方法": "从客服记录/工单/日志中提取",
"成本": "低",
"质量": "⭐⭐⭐",
"数量": "1000-100000 条",
"适用": "有业务积累的场景",
"技巧": [
"筛选好评/解决的工单",
"脱敏处理个人信息",
"清除垃圾数据和噪音",
],
},
"LLM 辅助生成": {
"方法": "用 GPT-4 生成训练数据",
"成本": "中",
"质量": "⭐⭐⭐⭐",
"数量": "1000-50000 条",
"适用": "冷启动、数据不足",
"技巧": [
"用 GPT-4 生成后人工审核",
"设定多样化的 Prompt 模板",
"避免数据同质化",
],
},
"开源数据集": {
"方法": "使用 HuggingFace Datasets 等",
"成本": "免费",
"质量": "⭐⭐⭐",
"数量": "大量",
"适用": "通用能力增强",
"技巧": [
"检查许可证合规性",
"需要适配自己的格式",
"可与自有数据混合使用",
],
},
}
strategy = DataCollectionStrategy()
print("=== 数据收集策略 ===")
for name, info in strategy.STRATEGIES.items():
print(f"\n{name}:")
print(f" 质量: {info['质量']} | 成本: {info['成本']}")
print(f" 适用: {info['适用']}")
数据清洗与质量控制
"""
数据清洗 Pipeline
"""
import re
from dataclasses import dataclass, field
@dataclass
class DataCleaner:
"""数据清洗器"""
min_length: int = 10
max_length: int = 4096
removed: int = 0
kept: int = 0
reasons: dict = field(default_factory=dict)
def clean(self, examples: list[dict]) -> list[dict]:
"""清洗数据集"""
cleaned = []
for ex in examples:
ok, reason = self._validate(ex)
if ok:
cleaned.append(self._normalize(ex))
self.kept += 1
else:
self.removed += 1
self.reasons[reason] = self.reasons.get(reason, 0) + 1
return cleaned
def _validate(self, example: dict) -> tuple[bool, str]:
"""验证单条数据"""
# 检查必须字段
if "instruction" not in example:
return False, "缺少 instruction"
if "output" not in example:
return False, "缺少 output"
output = example["output"]
# 长度检查
if len(output) < self.min_length:
return False, "output 太短"
if len(output) > self.max_length:
return False, "output 太长"
# 质量检查
if output.strip() == example.get("instruction", "").strip():
return False, "output 与 instruction 重复"
# 敏感信息检查
if re.search(r'\b\d{11}\b', output): # 手机号
return False, "包含手机号"
if re.search(r'\b\d{18}\b', output): # 身份证
return False, "包含身份证号"
return True, ""
def _normalize(self, example: dict) -> dict:
"""标准化数据"""
return {
"instruction": example["instruction"].strip(),
"input": example.get("input", "").strip(),
"output": example["output"].strip(),
}
def report(self) -> dict:
"""清洗报告"""
return {
"总数": self.kept + self.removed,
"保留": self.kept,
"删除": self.removed,
"保留率": f"{self.kept / (self.kept + self.removed) * 100:.1f}%",
"删除原因": self.reasons,
}
# 使用
cleaner = DataCleaner(min_length=10, max_length=2000)
test_data = [
{"instruction": "翻译", "output": "Hello World, this is a test"},
{"instruction": "总结", "output": "短"}, # 太短
{"instruction": "分析", "output": "分析"}, # 重复
{"instruction": "帮助", "input": "问题", "output": "这是一个有效的回答内容"},
]
cleaned = cleaner.clean(test_data)
print(f"清洗结果: {cleaner.report()}")
数据集划分
"""
数据集划分策略
"""
import random
def split_dataset(
data: list[dict],
train_ratio: float = 0.8,
val_ratio: float = 0.1,
test_ratio: float = 0.1,
seed: int = 42,
) -> dict:
"""划分数据集"""
assert abs(train_ratio + val_ratio + test_ratio - 1.0) < 1e-6
random.seed(seed)
shuffled = data.copy()
random.shuffle(shuffled)
n = len(shuffled)
train_end = int(n * train_ratio)
val_end = train_end + int(n * val_ratio)
result = {
"train": shuffled[:train_end],
"validation": shuffled[train_end:val_end],
"test": shuffled[val_end:],
}
print(f"数据集划分:")
for name, subset in result.items():
print(f" {name}: {len(subset)} 条 ({len(subset)/n*100:.0f}%)")
return result
# 划分建议
SPLIT_GUIDELINES = {
"数据量 < 500": {
"train": "80%",
"val": "10%",
"test": "10%",
"注意": "数据少时可用交叉验证",
},
"数据量 500-5000": {
"train": "80%",
"val": "10%",
"test": "10%",
"注意": "标准划分",
},
"数据量 > 5000": {
"train": "90%",
"val": "5%",
"test": "5%",
"注意": "验证集和测试集足够大",
},
}
print("=== 划分建议 ===")
for scenario, config in SPLIT_GUIDELINES.items():
print(f"\n{scenario}: train {config['train']} | val {config['val']}")
本章小结
| 环节 | 关键要点 | 工具推荐 |
|---|---|---|
| 数据格式 | ChatML 对话格式最通用 | JSONL 文件 |
| 数据收集 | 人工标注 + LLM 辅助 | GPT-4 生成 + 人工审核 |
| 数据清洗 | 去重、脱敏、长度过滤 | 自定义 Pipeline |
| 数据划分 | 80/10/10 最常用 | sklearn/random |
下一章:训练环境搭建与超参数配置。