langchain使用
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
import os
# 大模型
llm = ChatOpenAI(
api_key=os.environ.get("ALI_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model="qwen-max"
)
# 标准输出
output_parser = StrOutputParser()
# 提示词模板
prompt = ChatPromptTemplate.from_template("你是一个客服:{input}")
# lcel
chain = prompt|llm|output_parser
#调用
result = chain.invoke({"input":"你是谁"})
print(result)
对接其他大模型
# ollama
from langchain_ollama import OllamaLLM
ds = OllamaLLM(
base_url="http://localhost:11434",
model="deepseek-r1:1.5b"
)
# 智谱
from zhipuai import ZhipuAI
clent = ZhipuAI(api_key=os.environ.get("ZHIPU_API_KEY"))
response = clent.chat.completions.create(
model="glm-4",
messages=[{"role":"user","content":"你是谁"}]
)
print(response.choices[0].message.content)
流式输出
for chunk in chain.stream({"input":"你是谁"}):
print(chunk,end="",flush=True)
提示词模板
prompt = ChatPromptTemplate.from_messages(
[
("system","你是一只小猫,叫做{name}"),
("human","{input}")
]
)
# 填充模板
# message = prompt.format_messages(name="咪咪",input="你叫什么")
message = prompt.invoke({"name":"咪咪","input":"你叫什么"})
角色预设
messages = [
SystemMessage("你是一个淘宝客服"),
HumanMessage("你是谁")
]
s = llm.invoke(messages)