构建简单应用
将前面学习的知识整合,构建一个完整的LLM应用。
项目概览
我们将构建一个智能问答系统,功能包括: - 多轮对话 - 知识检索 - 流式输出 - Web界面
graph TB
A[用户] --> B[Streamlit界面]
B --> C[对话管理器]
C --> D[LLM API]
C --> E[向量数据库]
D --> F[生成回复]
E --> F
F --> B
B --> G[用户]
style B fill:#e1f5ff
style G fill:#c8e6c9
项目结构
simple-llm-app/
├── .env # 环境变量
├── requirements.txt # 依赖
├── app.py # 主应用
├── chat_manager.py # 对话管理
├── vector_store.py # 向量存储
└── knowledge/ # 知识库
└── docs.md
步骤1: 项目初始化
创建项目目录并安装依赖:
mkdir simple-llm-app
cd simple-llm-app
# 创建requirements.txt
cat > requirements.txt << EOF
streamlit>=1.29.0
langchain>=0.1.0
langchain-openai>=0.0.5
langchain-community>=0.0.15
chromadb>=0.4.0
python-dotenv>=1.0.0
tiktoken>=0.5.0
openai>=1.0.0
EOF
# 安装依赖
pip install -r requirements.txt
# 创建.env文件
cat > .env << EOF
OPENAI_API_KEY=your_api_key_here
EOF
步骤2: 对话管理器
创建 chat_manager.py:
import os
from typing import List, Dict
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
class ChatManager:
"""对话管理器"""
def __init__(self, system_prompt: str = "你是一个有帮助的助手。"):
"""
初始化对话管理器
Args:
system_prompt: 系统提示词
"""
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
self.system_prompt = system_prompt
self.messages: List[Dict[str, str]] = [
{"role": "system", "content": system_prompt}
]
self.max_history = 10 # 保留最近10轮对话
def add_user_message(self, content: str):
"""添加用户消息"""
self.messages.append({
"role": "user",
"content": content
})
def add_assistant_message(self, content: str):
"""添加AI回复"""
self.messages.append({
"role": "assistant",
"content": content
})
def generate_response(
self,
stream: bool = False,
model: str = "gpt-4o-mini",
temperature: float = 0.7
):
"""
生成回复
Args:
stream: 是否流式输出
model: 模型名称
temperature: 温度参数
Returns:
生成器(如果stream=True)或回复文本
"""
if stream:
return self._stream_response(model, temperature)
else:
return self._normal_response(model, temperature)
def _normal_response(self, model: str, temperature: float):
"""普通回复(非流式)"""
response = self.client.chat.completions.create(
model=model,
messages=self.messages,
temperature=temperature
)
content = response.choices[0].message.content
self.add_assistant_message(content)
return content
def _stream_response(self, model: str, temperature: float):
"""流式回复"""
stream = self.client.chat.completions.create(
model=model,
messages=self.messages,
temperature=temperature,
stream=True
)
full_content = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
full_content += content
yield content
# 添加完整回复到历史
self.add_assistant_message(full_content)
def clear_history(self):
"""清空对话历史"""
self.messages = [
{"role": "system", "content": self.system_prompt}
]
def get_history(self) -> List[Dict[str, str]]:
"""获取对话历史"""
return self.messages[1:] # 排除system消息
def get_token_count(self) -> int:
"""计算token数量"""
import tiktoken
encoding = tiktoken.encoding_for_model("gpt-4o-mini")
total_tokens = 0
for msg in self.messages:
total_tokens += len(encoding.encode(msg["content"]))
return total_tokens
def get_cost_estimate(self) -> float:
"""估算成本"""
token_count = self.get_token_count()
# GPT-4o-mini定价: $0.15/1M input tokens, $0.60/1M output tokens
cost = token_count * 0.15 / 1_000_000
return cost
步骤3: 向量数据库(可选)
创建 vector_store.py:
from typing import List
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
class KnowledgeBase:
"""知识库"""
def __init__(self, persist_directory: str = "./chroma_db"):
"""
初始化知识库
Args:
persist_directory: 持久化目录
"""
self.embeddings = OpenAIEmbeddings()
self.persist_directory = persist_directory
# 尝试加载已有数据库
try:
self.vectorstore = Chroma(
persist_directory=persist_directory,
embedding_function=self.embeddings
)
print("✅ 加载已有知识库")
except:
self.vectorstore = None
print("ℹ️ 未找到已有知识库")
def add_documents(self, texts: List[str]):
"""
添加文档到知识库
Args:
texts: 文本列表
"""
# 文本分块
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
chunks = []
for text in texts:
chunks.extend(text_splitter.split_text(text))
# 创建向量数据库
if self.vectorstore is None:
self.vectorstore = Chroma.from_texts(
texts=chunks,
embedding=self.embeddings,
persist_directory=self.persist_directory
)
else:
self.vectorstore.add_texts(chunks)
# 持久化
self.vectorstore.persist()
print(f"✅ 添加了 {len(chunks)} 个文档块")
def search(self, query: str, k: int = 3) -> List[str]:
"""
搜索相关文档
Args:
query: 查询
k: 返回结果数
Returns:
相关文档列表
"""
if self.vectorstore is None:
return []
results = self.vectorstore.similarity_search(query, k=k)
return [doc.page_content for doc in results]
def search_with_scores(self, query: str, k: int = 3) -> List[tuple]:
"""
搜索并返回分数
Args:
query: 查询
k: 返回结果数
Returns:
(文档, 分数)元组列表
"""
if self.vectorstore is None:
return []
results = self.vectorstore.similarity_search_with_score(query, k=k)
return [(doc.page_content, score) for doc, score in results]
步骤4: Streamlit界面
创建 app.py:
import streamlit as st
from chat_manager import ChatManager
from vector_store import KnowledgeBase
import os
# 页面配置
st.set_page_config(
page_title="智能问答系统",
page_icon="🤖",
layout="wide"
)
# 初始化session state
if "messages" not in st.session_state:
st.session_state.messages = []
if "chat_manager" not in st.session_state:
st.session_state.chat_manager = ChatManager(
system_prompt="你是一个有帮助的AI助手。请用简洁、准确的语言回答问题。"
)
if "knowledge_base" not in st.session_state:
st.session_state.knowledge_base = KnowledgeBase()
# 侧边栏
with st.sidebar:
st.title("⚙️ 设置")
# 模型选择
model = st.selectbox(
"选择模型",
["gpt-4o-mini", "gpt-4o", "gpt-4-turbo"],
index=0
)
# 温度
temperature = st.slider(
"温度 (创造性)",
min_value=0.0,
max_value=2.0,
value=0.7,
step=0.1
)
# 流式输出
use_stream = st.checkbox("流式输出", value=True)
st.divider()
# 知识库管理
st.subheader("📚 知识库")
uploaded_files = st.file_uploader(
"上传文档",
type=["txt", "md"],
accept_multiple_files=True
)
if uploaded_files:
for file in uploaded_files:
text = file.read().decode("utf-8")
st.session_state.knowledge_base.add_documents([text])
# 搜索
query = st.text_input("搜索知识库")
if query:
results = st.session_state.knowledge_base.search(query, k=3)
if results:
st.write("🔍 搜索结果:")
for i, result in enumerate(results, 1):
st.write(f"{i}. {result[:100]}...")
st.divider()
# 统计信息
st.subheader("📊 统计")
token_count = st.session_state.chat_manager.get_token_count()
cost = st.session_state.chat_manager.get_cost_estimate()
st.metric("对话轮数", len(st.session_state.messages) // 2)
st.metric("Token总数", f"{token_count:,}")
st.metric("预估成本", f"${cost:.4f}")
# 清空按钮
if st.button("🗑️ 清空对话"):
st.session_state.messages = []
st.session_state.chat_manager.clear_history()
st.rerun()
# 主界面
st.title("🤖 智能问答系统")
st.markdown("---")
# 显示对话历史
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# 用户输入
if prompt := st.chat_input("输入你的问题..."):
# 显示用户消息
with st.chat_message("user"):
st.markdown(prompt)
# 添加到历史
st.session_state.messages.append({
"role": "user",
"content": prompt
})
# 检索知识库
relevant_docs = st.session_state.knowledge_base.search(prompt, k=2)
# 构建增强提示词
if relevant_docs:
context = "\n\n".join(relevant_docs)
enhanced_prompt = f"""
参考信息:
{context}
问题:{prompt}
请基于以上参考信息回答问题。如果参考信息不足,请基于你的知识回答。
"""
st.session_state.chat_manager.add_user_message(enhanced_prompt)
else:
st.session_state.chat_manager.add_user_message(prompt)
# 生成回复
with st.chat_message("assistant"):
if use_stream:
# 流式输出
full_response = ""
message_placeholder = st.empty()
for chunk in st.session_state.chat_manager.generate_response(
stream=True,
model=model,
temperature=temperature
):
full_response += chunk
message_placeholder.markdown(full_response)
# 添加到历史
st.session_state.messages.append({
"role": "assistant",
"content": full_response
})
else:
# 普通输出
with st.spinner("思考中..."):
response = st.session_state.chat_manager.generate_response(
stream=False,
model=model,
temperature=temperature
)
st.markdown(response)
st.session_state.messages.append({
"role": "assistant",
"content": response
})
# 页脚
st.markdown("---")
st.markdown(
"""
<div style='text-align: center; color: gray; font-size: 0.8em;'>
Powered by OpenAI GPT-4o | Made with ❤️ and LangChain
</div>
""",
unsafe_allow_html=True
)
步骤5: 运行应用
# 确保在simple-llm-app目录下
cd simple-llm-app
# 运行应用
streamlit run app.py
应用将在 http://localhost:8501 打开。
功能演示
对话功能
- 在输入框输入问题
- AI会实时回复
- 支持多轮对话,AI会记住上下文
知识库功能
- 在侧边栏上传文档
- 文档会自动索引到向量数据库
- 问答时会检索相关文档并作为参考
高级功能
- 切换不同模型
- 调整温度参数
- 查看Token使用和成本
- 清空对话历史
扩展功能
添加代码高亮
# 在app.py中添加
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
def highlight_code(code, language):
"""高亮代码"""
try:
lexer = get_lexer_by_name(language)
formatter = HtmlFormatter(style='friendly')
return pygments.highlight(code, lexer, formatter)
except:
return code
添加导出功能
# 添加导出对话按钮
def export_chat():
"""导出对话历史"""
import json
data = {
"timestamp": str(datetime.now()),
"messages": st.session_state.messages
}
return json.dumps(data, ensure_ascii=False, indent=2)
if st.button("📥 导出对话"):
st.download_button(
"下载",
export_chat(),
"chat_history.json",
"application/json"
)
学习要点
✅ 将LLM API集成到Web应用 ✅ 使用Streamlit快速构建界面 ✅ 实现多轮对话管理 ✅ 集成向量数据库增强回答 ✅ 流式输出提升用户体验
下一步: 构建 完整RAG应用 📖