模型导出与推理优化
High Contrast
Dark Mode
Light Mode
Sepia
Forest
1 min read144 words

模型导出与推理优化

微调完成只是开始——怎样让模型跑得快、占得少、部署得稳?

推理优化全景

graph LR FT[微调完成] --> MERGE[合并 LoRA] MERGE --> QUANT[量化] MERGE --> PRUNE[剪枝] QUANT --> GGUF[GGUF 格式] QUANT --> GPTQ[GPTQ 格式] QUANT --> AWQ[AWQ 格式] GGUF --> LLAMA_CPP[llama.cpp] GPTQ --> VLLM[vLLM] AWQ --> TGI[TGI] VLLM --> DEPLOY[生产部署] TGI --> DEPLOY LLAMA_CPP --> EDGE[边缘设备] style FT fill:#fff3e0,stroke:#f57c00,stroke-width:2px style DEPLOY fill:#c8e6c9,stroke:#388e3c,stroke-width:2px style EDGE fill:#e3f2fd,stroke:#1565c0,stroke-width:2px

LoRA 合并与导出

"""
LoRA 合并与导出
"""
class LoRAMerger:
"""LoRA 适配器合并"""
STEPS = """
# 1. 加载基座模型和 LoRA
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
base_model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-7B-Instruct",
torch_dtype=torch.float16,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(
"Qwen/Qwen2.5-7B-Instruct"
)
# 2. 加载 LoRA 权重
model = PeftModel.from_pretrained(
base_model,
"./my-lora-adapter",
)
# 3. 合并 LoRA 回基座
merged_model = model.merge_and_unload()
# 4. 保存合并后的模型
merged_model.save_pretrained("./merged-model")
tokenizer.save_pretrained("./merged-model")
print("✅ LoRA 合并完成,模型保存在 ./merged-model")
"""
NOTES = [
"合并后模型大小 = 基座大小(LoRA 被吸收)",
"合并前后推理结果相同",
"合并后不再需要 PEFT 库加载",
"建议先验证再合并",
]
merger = LoRAMerger()
print("=== LoRA 合并步骤 ===")
print(merger.STEPS)
print("\n注意事项:")
for note in merger.NOTES:
print(f"  - {note}")

模型量化

"""
模型量化:减少模型大小和显存占用
"""
class ModelQuantization:
"""模型量化方法"""
METHODS = {
"GPTQ": {
"类型": "训练后量化 (PTQ)",
"精度": "4-bit / 8-bit",
"速度": "快",
"质量": "⭐⭐⭐⭐",
"适用": "GPU 推理",
"工具": "auto-gptq",
},
"AWQ": {
"类型": "激活感知量化",
"精度": "4-bit",
"速度": "最快",
"质量": "⭐⭐⭐⭐⭐",
"适用": "GPU 推理(推荐)",
"工具": "autoawq",
},
"GGUF": {
"类型": "GGML 格式量化",
"精度": "2-8 bit",
"速度": "中等",
"质量": "⭐⭐⭐ ~ ⭐⭐⭐⭐",
"适用": "CPU / Apple Silicon",
"工具": "llama.cpp",
},
"BitsAndBytes": {
"类型": "动态量化",
"精度": "4-bit / 8-bit",
"速度": "中等",
"质量": "⭐⭐⭐⭐",
"适用": "训练 + 推理",
"工具": "bitsandbytes",
},
}
GPTQ_CODE = """
# GPTQ 量化示例
from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig
model_id = "./merged-model"
quantization_config = GPTQConfig(
bits=4,
dataset="c4",
tokenizer=AutoTokenizer.from_pretrained(model_id),
)
model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=quantization_config,
device_map="auto",
)
model.save_pretrained("./model-gptq-4bit")
print("✅ GPTQ 4-bit 量化完成")
"""
AWQ_CODE = """
# AWQ 量化示例
from awq import AutoAWQForCausalLM
from transformers import AutoTokenizer
model_path = "./merged-model"
quant_path = "./model-awq-4bit"
model = AutoAWQForCausalLM.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
quant_config = {
"zero_point": True,
"q_group_size": 128,
"w_bit": 4,
"version": "GEMM",
}
model.quantize(tokenizer, quant_config=quant_config)
model.save_quantized(quant_path)
tokenizer.save_pretrained(quant_path)
print("✅ AWQ 4-bit 量化完成")
"""
GGUF_CODE = """
# GGUF 格式转换(使用 llama.cpp)
# 安装
git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp && make
# 转换
python convert_hf_to_gguf.py ./merged-model --outfile model.gguf
# 量化
./llama-quantize model.gguf model-Q4_K_M.gguf Q4_K_M
# 常用量化级别
# Q2_K   - 2-bit, 最小,质量较低
# Q4_K_M - 4-bit, 推荐平衡
# Q5_K_M - 5-bit, 推荐高质量
# Q8_0   - 8-bit, 质量接近原始
"""
quant = ModelQuantization()
print("=== 量化方法对比 ===")
for name, info in quant.METHODS.items():
print(f"\n{name}: {info['类型']}")
print(f"  精度: {info['精度']}, 速度: {info['速度']}, 质量: {info['质量']}")
print(f"  适用: {info['适用']}")

推理引擎

"""
推理引擎选择
"""
class InferenceEngine:
"""推理引擎"""
ENGINES = {
"vLLM": {
"特点": "PagedAttention, 连续批处理",
"吞吐": "最高",
"适用": "GPU 推理服务",
"模型格式": "HF / GPTQ / AWQ",
"部署示例": """
# 启动 vLLM 服务
python -m vllm.entrypoints.openai.api_server \\
--model ./model-awq-4bit \\
--quantization awq \\
--max-model-len 4096 \\
--gpu-memory-utilization 0.9 \\
--port 8000
# API 兼容 OpenAI 格式
curl http://localhost:8000/v1/chat/completions \\
-H "Content-Type: application/json" \\
-d '{"model": "model", "messages": [{"role": "user", "content": "你好"}]}'
""",
},
"TGI (Text Generation Inference)": {
"特点": "HuggingFace 官方, Docker 一键部署",
"吞吐": "高",
"适用": "容器化部署",
"模型格式": "HF / GPTQ / AWQ",
"部署示例": """
# Docker 启动
docker run --gpus all \\
-p 8080:80 \\
-v ./model:/model \\
ghcr.io/huggingface/text-generation-inference \\
--model-id /model \\
--quantize awq \\
--max-input-tokens 2048 \\
--max-total-tokens 4096
""",
},
"llama.cpp": {
"特点": "纯 C++ 实现, CPU 友好",
"吞吐": "中等",
"适用": "CPU / 边缘设备 / Mac",
"模型格式": "GGUF",
"部署示例": """
# 启动服务
./llama-server \\
-m model-Q4_K_M.gguf \\
-c 4096 \\
-ngl 35 \\
--host 0.0.0.0 \\
--port 8080
""",
},
"Ollama": {
"特点": "一键安装运行, 最简单",
"吞吐": "中等",
"适用": "本地开发测试",
"模型格式": "GGUF",
"部署示例": """
# 创建 Modelfile
FROM ./model-Q4_K_M.gguf
PARAMETER temperature 0.7
SYSTEM 你是一个专业助手
# 创建和运行
ollama create mymodel -f Modelfile
ollama run mymodel
""",
},
}
engine = InferenceEngine()
print("=== 推理引擎对比 ===")
for name, info in engine.ENGINES.items():
print(f"\n{name}:")
print(f"  特点: {info['特点']}")
print(f"  吞吐: {info['吞吐']}")
print(f"  适用: {info['适用']}")

性能对比

方案 显存需求 吞吐 (tok/s) 延迟 (ms) 部署难度
7B FP16 + vLLM 14GB 120 80
7B AWQ-4bit + vLLM 5GB 150 60
7B GGUF-Q4 + llama.cpp CPU 8GB 30 300
7B GPTQ-4bit + TGI 5GB 130 70
70B AWQ-4bit + vLLM 40GB 30 300

下一章:版本管理与监控——模型上线后的运维策略。