视觉语言模型实战
High Contrast
Dark Mode
Light Mode
Sepia
Forest
1 min read156 words

视觉语言模型实战

视觉语言模型(VLM)是当前多模态 AI 的核心能力。学会用好它,80% 的图像理解任务就解决了。

VLM 工作原理

graph LR A[图像] --> B[视觉编码器
ViT/CLIP] B --> C[视觉 Token] D[文本提示] --> E[文本 Token] C --> F[融合层] E --> F F --> G[语言模型
Transformer] G --> H[文本输出] style F fill:#e3f2fd,stroke:#1976d2,stroke-width:2px

GPT-4V / GPT-4o 视觉

"""
GPT-4o 视觉能力实战
使用 OpenAI API 进行图像理解
"""
import base64
from pathlib import Path
class VisionAnalyzer:
"""视觉语言模型分析器"""
def __init__(self, model: str = "gpt-4o"):
self.model = model
def encode_image(self, image_path: str) -> str:
"""将图像编码为 base64"""
with open(image_path, "rb") as f:
return base64.b64encode(f.read()).decode("utf-8")
def build_message(
self,
prompt: str,
image_source: str,
detail: str = "auto",
) -> list[dict]:
"""
构建多模态消息
detail 参数:
- "low": 512x512, ~85 tokens, 快速
- "high": 最高 2048x, ~1105 tokens, 精确
- "auto": 自动选择
"""
# 判断是 URL 还是本地文件
if image_source.startswith("http"):
image_content = {
"type": "image_url",
"image_url": {
"url": image_source,
"detail": detail,
},
}
else:
b64 = self.encode_image(image_source)
image_content = {
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{b64}",
"detail": detail,
},
}
return [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
image_content,
],
}
]
def analyze_image(
self, prompt: str, image_source: str
) -> dict:
"""
分析图像(API 调用结构)
实际调用:
response = client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=1024,
)
"""
messages = self.build_message(prompt, image_source)
return {
"model": self.model,
"messages": messages,
"max_tokens": 1024,
}
def batch_analyze(
self, images: list[str], prompt: str
) -> list[dict]:
"""批量分析多张图片"""
results = []
for img in images:
result = self.analyze_image(prompt, img)
results.append(result)
return results
# VLM Prompt 模板
VLM_PROMPTS = {
"图像描述": "请详细描述这张图片的内容,包括主体、背景、颜色、风格。",
"OCR 提取": "请提取图片中的所有文字内容,保持原始格式。",
"图表分析": (
"请分析这个图表:\n"
"1. 图表类型\n"
"2. 数据趋势\n"
"3. 关键发现\n"
"请用结构化格式输出。"
),
"商品分析": (
"请分析这个商品图片:\n"
"1. 商品类别\n"
"2. 品牌(如能识别)\n"
"3. 颜色和材质\n"
"4. 建议的关键词标签"
),
"缺陷检测": (
"请仔细检查这张产品图片,识别是否存在以下缺陷:\n"
"- 划痕\n"
"- 变形\n"
"- 颜色不均\n"
"- 缺失部件\n"
"给出检测结果和置信度。"
),
}
print("=== VLM Prompt 模板 ===")
for name, prompt in VLM_PROMPTS.items():
print(f"\n[{name}]")
print(f"  {prompt[:60]}...")

多图理解

"""
多图对比与分析
"""
class MultiImageAnalyzer:
"""多图分析器"""
@staticmethod
def build_comparison_message(
images: list[str],
task: str = "compare",
) -> list[dict]:
"""
构建多图消息
GPT-4o 支持在单次请求中发送多张图片
"""
prompts = {
"compare": "请对比这些图片,找出它们的异同。",
"sequence": "这些图片按顺序展示了一个过程,请描述。",
"best": "请从这些选项中选出最好的,并解释原因。",
}
content = [
{"type": "text", "text": prompts.get(task, task)},
]
for i, img_url in enumerate(images):
content.append({
"type": "image_url",
"image_url": {"url": img_url, "detail": "low"},
})
return [{"role": "user", "content": content}]
@staticmethod
def estimate_cost(
num_images: int,
detail: str = "auto",
) -> dict:
"""估算视觉 API 成本"""
# GPT-4o 图像 token 估算
tokens_per_image = {
"low": 85,
"high": 1105,  # 最大
"auto": 500,   # 平均估计
}
img_tokens = tokens_per_image.get(detail, 500) * num_images
text_tokens = 200  # 假设 Prompt 200 tokens
total_tokens = img_tokens + text_tokens
cost = total_tokens / 1_000_000 * 2.50  # GPT-4o 输入价格
return {
"images": num_images,
"detail": detail,
"image_tokens": img_tokens,
"total_tokens": total_tokens,
"estimated_cost": f"${cost:.4f}",
}
# 成本估算
for detail in ["low", "high", "auto"]:
cost = MultiImageAnalyzer.estimate_cost(5, detail)
print(f"  5张图 ({detail}): {cost['image_tokens']} tokens, {cost['estimated_cost']}")

VLM 最佳实践

"""
VLM 使用最佳实践
"""
BEST_PRACTICES = {
"图片预处理": [
"压缩过大的图片 (>20MB) 到合适大小",
"旋转/裁剪到正确方向",
"对于 OCR 任务,提高对比度",
"多张小图可拼接成一张大图节省成本",
],
"Prompt 技巧": [
"明确指定输出格式 (JSON/表格/列表)",
"对复杂图像使用 Chain-of-Thought 引导",
"区分「看到什么」和「推理什么」",
"用 detail:low 做初筛, high 做精确分析",
],
"降低幻觉": [
"要求模型说明置信度",
"对关键信息做二次验证",
"提供参考上下文(如商品类别列表)",
"限制推理范围,只回答能从图中判断的",
],
"成本控制": [
"简单任务用 detail:low (85 tokens/图)",
"批量处理用异步并发",
"缓存常见图片的分析结果",
"GPT-4o-mini 已支持视觉,成本低 10x",
],
}
for topic, tips in BEST_PRACTICES.items():
print(f"\n{topic}:")
for i, tip in enumerate(tips, 1):
print(f"  {i}. {tip}")

本章小结

下一章:图像理解与生成。