OpenCode CLI 配置与多模型对比
OpenCode 是一个开源的 AI 编程助手 CLI,支持同时接入 Claude、Gemini、GPT-4——让你在一个工具里切换和对比多个模型。
OpenCode 在工具链中的位置
graph TD
DEV[开发者] --> OC[OpenCode CLI]
OC --> M1[Claude Sonnet/Opus]
OC --> M2[Gemini 2.0 Flash]
OC --> M3[GPT-4o]
OC --> M4[本地模型 Ollama]
OC --> FEAT[核心功能]
FEAT --> F1[TUI 交互界面]
FEAT --> F2[会话管理]
FEAT --> F3[代码执行沙箱]
FEAT --> F4[文件上下文]
FEAT --> F5[多模型对比]
M1 -->|最佳代码理解| RESULT[任务完成]
M2 -->|最大上下文| RESULT
M3 -->|广泛集成| RESULT
M4 -->|离线/隐私| RESULT
style OC fill:#c8e6c9,stroke:#388e3c,stroke-width:2px
style RESULT fill:#e3f2fd,stroke:#1565c0,stroke-width:2px
安装与多模型配置
# 安装 OpenCode(Go 二进制,无依赖)
# macOS
brew install opencode-ai/tap/opencode
# Linux
curl -fsSL https://opencode.ai/install.sh | sh
# 或从 npm 安装
npm install -g opencode-ai
# 验证
opencode --version
"""
OpenCode CLI 配置与多模型工作流演示
展示如何在项目中设置和切换 AI 模型
"""
import json
import os
from dataclasses import dataclass, field
from enum import Enum
class ModelProvider(Enum):
ANTHROPIC = "anthropic"
GOOGLE = "google"
OPENAI = "openai"
OLLAMA = "ollama"
GROQ = "groq"
@dataclass
class ModelConfig:
"""模型配置"""
provider: ModelProvider
model_id: str
display_name: str
context_window: int # token 数
cost_per_1m_input: float # 美元
cost_per_1m_output: float
strengths: list[str]
weaknesses: list[str]
env_var: str # API Key 环境变量
@dataclass
class OpenCodeConfig:
"""OpenCode 项目配置(opencode.json)"""
model: str = "claude-sonnet-4-5-20251001"
providers: dict = field(default_factory=dict)
instructions: str = "" # 相当于 CLAUDE.md / GEMINI.md
autoshare: bool = False
keybindings: dict = field(default_factory=dict)
def to_json(self) -> str:
return json.dumps({
"model": self.model,
"provider": self.providers,
"instructions": self.instructions,
"autoshare": self.autoshare,
}, indent=2, ensure_ascii=False)
class ModelBenchmark:
"""多模型对比基准"""
MODELS = [
ModelConfig(
provider=ModelProvider.ANTHROPIC,
model_id="claude-sonnet-4-6",
display_name="Claude Sonnet 4.6",
context_window=200_000,
cost_per_1m_input=3.0,
cost_per_1m_output=15.0,
strengths=["代码生成质量最高", "指令遵循准确", "长文档理解", "安全拒绝边缘请求"],
weaknesses=["无实时网络搜索", "知识截止日期"],
env_var="ANTHROPIC_API_KEY",
),
ModelConfig(
provider=ModelProvider.GOOGLE,
model_id="gemini-2.0-flash",
display_name="Gemini 2.0 Flash",
context_window=1_000_000,
cost_per_1m_input=0.075,
cost_per_1m_output=0.30,
strengths=["超大上下文(100万token)", "速度最快", "Google Search 集成", "多模态"],
weaknesses=["代码复杂逻辑偶有偏差", "指令遵循不如 Claude"],
env_var="GEMINI_API_KEY",
),
ModelConfig(
provider=ModelProvider.OPENAI,
model_id="gpt-4o",
display_name="GPT-4o",
context_window=128_000,
cost_per_1m_input=2.50,
cost_per_1m_output=10.0,
strengths=["生态最成熟", "插件/工具支持广", "代码 Interpreter"],
weaknesses=["代码生成不如 Claude", "价格较高"],
env_var="OPENAI_API_KEY",
),
ModelConfig(
provider=ModelProvider.OLLAMA,
model_id="codellama:13b",
display_name="CodeLlama 13B (本地)",
context_window=16_000,
cost_per_1m_input=0.0,
cost_per_1m_output=0.0,
strengths=["完全离线", "数据不出本机", "无 API 费用"],
weaknesses=["能力远低于云端模型", "需要本地 GPU", "上下文窗口小"],
env_var="",
),
]
TASK_RECOMMENDATIONS = {
"日常代码生成": "Claude Sonnet 4.6",
"超大代码库分析(>50万字符)": "Gemini 2.0 Flash",
"快速原型/草稿": "Gemini 2.0 Flash(免费快速)",
"需要实时信息": "Gemini 2.0 Flash(Google Search)",
"高保密性代码": "CodeLlama 本地运行",
"OpenAI API 兼容生态": "GPT-4o",
"成本最优": "Gemini Flash(最低价)",
"质量最优": "Claude Opus 4.6",
}
@classmethod
def compare_models(cls) -> None:
print("=== 模型对比矩阵 ===\n")
print(f"{'模型':<25} {'上下文':<12} {'输入价/1M':<12} {'主要优势'}")
print("-" * 80)
for m in cls.MODELS:
ctx = f"{m.context_window // 1000}K" if m.context_window < 1_000_000 else "1M"
price = f"${m.cost_per_1m_input}" if m.cost_per_1m_input > 0 else "免费"
print(f"{m.display_name:<25} {ctx:<12} {price:<12} {m.strengths[0]}")
@classmethod
def recommend(cls, task: str) -> str:
for task_key, model in cls.TASK_RECOMMENDATIONS.items():
if any(kw in task for kw in task_key.split("/")):
return f"推荐: {model}({task_key})"
return "推荐: Claude Sonnet 4.6(通用最优)"
class OpenCodeWorkflows:
"""OpenCode CLI 典型工作流"""
CONFIG_EXAMPLE = OpenCodeConfig(
model="claude-sonnet-4-5-20251001",
providers={
"anthropic": {"apiKey": "${ANTHROPIC_API_KEY}"},
"google": {"apiKey": "${GEMINI_API_KEY}"},
"openai": {"apiKey": "${OPENAI_API_KEY}"},
},
instructions="""你是这个 FastAPI 项目的开发助手。
- 使用 Python 3.12+ 特性
- 遵循 PEP 8 代码规范
- 所有函数需要类型注解
- 数据库操作使用异步 SQLAlchemy""",
)
SHELL_COMMANDS = {
"进入交互模式": "opencode",
"快速提问": 'opencode -m "如何优化这个 SQL 查询?" < query.sql',
"切换模型": "/model gemini-2.0-flash", # 交互模式内命令
"查看当前模型": "/model",
"分析文件": 'opencode -m "审查这段代码的安全问题" < api.py',
"对比两个模型": "opencode --compare claude,gemini -m '实现一个 LRU 缓存'",
"查看费用统计": "opencode stats",
"清除会话": "/clear",
}
@classmethod
def print_config(cls):
print("=== opencode.json 配置示例 ===\n")
print(cls.CONFIG_EXAMPLE.to_json())
@classmethod
def print_commands(cls):
print("\n=== 常用命令速查 ===\n")
for desc, cmd in cls.SHELL_COMMANDS.items():
print(f" {desc}:")
print(f" $ {cmd}\n")
# 演示
print("=== OpenCode CLI — 多模型开发工作流 ===\n")
benchmark = ModelBenchmark()
benchmark.compare_models()
print("\n=== 任务 → 模型推荐 ===\n")
tasks = ["超大代码库分析", "日常代码生成", "需要实时信息", "高保密性代码"]
for task in tasks:
print(f" {task}: {benchmark.recommend(task)}")
workflows = OpenCodeWorkflows()
workflows.print_config()
workflows.print_commands()
模型切换决策树
| 条件 | 使用模型 |
|---|---|
| 代码 > 50 万字符 | Gemini 2.0 Flash(1M 上下文) |
| 需要 Google 搜索 | Gemini CLI |
| 日常编码/调试 | Claude Sonnet 4.6 |
| 月预算 < $10 | Gemini Flash(免费层) |
| 数据不能出境 | 本地 Ollama |
| 复杂推理任务 | Claude Opus 4.6 |
行动清单
- [ ] 安装 OpenCode:
brew install opencode-ai/tap/opencode或npm install -g opencode-ai - [ ] 在项目根目录创建
opencode.json,配置 Anthropic + Google 双 API Key - [ ] 运行
opencode进入 TUI,用/model命令切换模型体验差异 - [ ] 对比测试:同一个重构任务分别问 Claude 和 Gemini,对比回答质量
- [ ] 设置
instructions字段(相当于项目 System Prompt),记录代码规范 - [ ] 用
opencode stats查看本月各模型用量和费用,优化模型选择策略
下一节:03-双引擎策略-分工协作 — Claude + Gemini 互补使用的系统化策略。