多模态 Embedding 与向量检索
High Contrast
Dark Mode
Light Mode
Sepia
Forest
2 min read305 words

多模态 Embedding 与向量检索

文本 Embedding 已经被广泛应用,而多模态 Embedding 把图像、音频、视频也映射到同一语义向量空间,从而实现跨模态检索——"用一句话搜索图片"或"用图片搜索相关文档"。

多模态向量空间架构

graph TB A[多模态输入] --> B[文本 Encoder
CLIP Text] A --> C[图像 Encoder
CLIP Vision] A --> D[音频 Encoder
AudioCLIP] B --> E[联合语义空间
Joint Embedding Space] C --> E D --> E E --> F[向量数据库
Vector DB] F --> G[跨模态检索
Cross-modal Search] style E fill:#ede7f6,stroke:#5e35b1,stroke-width:2px style F fill:#e3f2fd,stroke:#1565c0,stroke-width:2px style G fill:#c8e6c9,stroke:#43a047,stroke-width:2px

CLIP 多模态 Embedding 实现

from dataclasses import dataclass
from pathlib import Path
from typing import Union
import numpy as np
@dataclass
class EmbeddingRecord:
"""多模态 Embedding 记录"""
id: str
modality: str           # "text" / "image" / "audio"
source: str             # 原始内容描述或路径
embedding: list[float]
metadata: dict
@property
def vector(self) -> np.ndarray:
return np.array(self.embedding, dtype=np.float32)
def cosine_similarity(self, other: "EmbeddingRecord") -> float:
"""计算余弦相似度"""
a = self.vector / (np.linalg.norm(self.vector) + 1e-8)
b = other.vector / (np.linalg.norm(other.vector) + 1e-8)
return float(np.dot(a, b))
class MultimodalEmbedder:
"""
多模态 Embedding 生成器(CLIP 封装)
依赖: pip install transformers torch pillow
"""
def __init__(self, model_name: str = "openai/clip-vit-base-patch32"):
self.model_name = model_name
self._model = None
self._processor = None
def _load_model(self):
"""延迟加载模型"""
if self._model is None:
from transformers import CLIPModel, CLIPProcessor
self._processor = CLIPProcessor.from_pretrained(self.model_name)
self._model = CLIPModel.from_pretrained(self.model_name)
def embed_text(self, text: str, record_id: str = "") -> EmbeddingRecord:
"""生成文本 Embedding"""
self._load_model()
import torch
inputs = self._processor(text=[text], return_tensors="pt", padding=True)
with torch.no_grad():
features = self._model.get_text_features(**inputs)
embedding = features[0].numpy().tolist()
return EmbeddingRecord(
id=record_id or f"text_{hash(text)}",
modality="text",
source=text[:100],
embedding=embedding,
metadata={"length": len(text)},
)
def embed_image(
self, image_path: Union[str, Path], record_id: str = ""
) -> EmbeddingRecord:
"""生成图像 Embedding"""
self._load_model()
from PIL import Image
import torch
image = Image.open(image_path).convert("RGB")
inputs = self._processor(images=image, return_tensors="pt")
with torch.no_grad():
features = self._model.get_image_features(**inputs)
embedding = features[0].numpy().tolist()
return EmbeddingRecord(
id=record_id or f"img_{Path(image_path).stem}",
modality="image",
source=str(image_path),
embedding=embedding,
metadata={"width": image.width, "height": image.height},
)
def batch_embed_texts(self, texts: list[str]) -> list[EmbeddingRecord]:
"""批量生成文本 Embedding"""
return [self.embed_text(t, f"text_{i}") for i, t in enumerate(texts)]
class MultimodalVectorStore:
"""轻量多模态向量检索库(生产环境建议使用 Milvus/Weaviate)"""
def __init__(self):
self.records: list[EmbeddingRecord] = []
def add(self, record: EmbeddingRecord) -> None:
self.records.append(record)
def search(
self,
query: EmbeddingRecord,
top_k: int = 5,
modality_filter: str | None = None,
) -> list[tuple[EmbeddingRecord, float]]:
"""语义相似度检索"""
candidates = self.records
if modality_filter:
candidates = [r for r in candidates if r.modality == modality_filter]
scored = [
(record, query.cosine_similarity(record))
for record in candidates
if record.id != query.id
]
scored.sort(key=lambda x: x[1], reverse=True)
return scored[:top_k]
def cross_modal_search(
self,
text_query: EmbeddingRecord,
target_modality: str = "image",
top_k: int = 5,
) -> list[tuple[EmbeddingRecord, float]]:
"""跨模态检索:用文本查找图像"""
return self.search(text_query, top_k, modality_filter=target_modality)
# 使用示例(CLIP 跨模态检索)
store = MultimodalVectorStore()
# 模拟已索引的数据(真实场景调用 embedder.embed_image)
mock_embeddings = np.random.randn(5, 512).astype(np.float32)
for i, vec in enumerate(mock_embeddings):
store.add(EmbeddingRecord(
id=f"product_{i}",
modality="image",
source=f"products/item_{i}.jpg",
embedding=vec.tolist(),
metadata={"category": "electronics" if i % 2 == 0 else "clothing"},
))
print(f"向量库已索引 {len(store.records)} 条图像")
print("跨模态检索示例: 用文本 '红色连衣裙' 查找匹配图像...")
print("(真实环境中将文本 Embedding 传入 cross_modal_search)")

多模态向量数据库选型

数据库 多模态支持 规模 托管服务 特色
Milvus 亿级 Zilliz GPU 索引 / 高并发
Weaviate 亿级 Weaviate Cloud 自带 CLIP 集成
Qdrant 亿级 Qdrant Cloud Rust 实现 / 低延迟
Pinecone 亿级 纯托管 简单易用
ChromaDB 基础 百万级 开源 本地开发首选

本章小结

下一章:图像理解与生成