langchain-对话记忆
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_openai import ChatOpenAI
import os
# 大模型
llm = ChatOpenAI(
api_key=os.environ.get("ALI_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model="qwen-max"
)
#基于内存存储
history = InMemoryChatMessageHistory()
#第一轮
history.add_user_message("我叫张三")
resp1 = llm.invoke(history.messages)
print(resp1.content)
history.add_message(resp1)
#第二轮
history.add_user_message("我是谁")
resp2 = llm.invoke(history.messages)
print(resp2.content)
history.add_message(resp2)
基于redis存储
注意:需要使用redis stack
from langchain_redis import RedisChatMessageHistory
history = RedisChatMessageHistory(session_id="test",redis_url="redis://localhost:6379")
结合lcel
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
import os
from langchain_core.runnables.history import RunnableWithMessageHistory
# 大模型
llm = ChatOpenAI(
api_key=os.environ.get("ALI_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model="qwen-max"
)
# 标准输出
output_parser = StrOutputParser()
# 记录历史
history = InMemoryChatMessageHistory()
# InMemoryChatMessageHistory不是runnable的子类不能直接使用|
runnable = RunnableWithMessageHistory(
llm,
get_session_history=lambda: history #匿名函数,直接返回history
)
chain = runnable | output_parser
#第一轮
print(chain.invoke("我是张三"))
#第二轮
print(chain.invoke("我是谁"))