In [ ]
已复制!
%pip install llama-index-core
%pip install llama-index-core
设置¶
In [ ]
已复制!
from llama_index.core.memory import ChatSummaryMemoryBuffer
memory = ChatSummaryMemoryBuffer.from_defaults(
token_limit=40000,
# optional set the summary prompt, here's the default:
# summarize_prompt=(
# "The following is a conversation between the user and assistant. "
# "Write a concise summary about the contents of this conversation."
# )
)
from llama_index.core.memory import ChatSummaryMemoryBuffer memory = ChatSummaryMemoryBuffer.from_defaults( token_limit=40000, # 可选:设置摘要提示词,以下是默认提示词: # summarize_prompt=( # "以下是用户与助手之间的对话。请对本次对话内容进行简洁摘要。" # ) )
独立使用¶
In [ ]
已复制!
from llama_index.core.llms import ChatMessage
chat_history = [
ChatMessage(role="user", content="Hello, how are you?"),
ChatMessage(role="assistant", content="I'm doing well, thank you!"),
]
# put a list of messages
memory.put_messages(chat_history)
# put one message at a time
# memory.put_message(chat_history[0])
from llama_index.core.llms import ChatMessage chat_history = [ ChatMessage(role="user", content="Hello, how are you?"), ChatMessage(role="assistant", content="I'm doing well, thank you!"), ] # 放入消息列表 memory.put_messages(chat_history) # 一次放入一条消息 # memory.put_message(chat_history[0])
In [ ]
已复制!
# Get the last X messages that fit into a token limit
history = memory.get()
# 获取符合令牌限制的最后 X 条消息 history = memory.get()
In [ ]
已复制!
# Get all messages
all_history = memory.get_all()
# 获取所有消息 all_history = memory.get_all()
In [ ]
已复制!
# clear the memory
memory.reset()
# 清空记忆 memory.reset()
与 Agent 结合使用¶
您可以在任何 agent 的 .run()
方法中设置记忆。
In [ ]
已复制!
import os
os.environ["OPENAI_API_KEY"] = "sk-proj-..."
import os os.environ["OPENAI_API_KEY"] = "sk-proj-..."
In [ ]
已复制!
from llama_index.core.agent.workflow import ReActAgent, FunctionAgent
from llama_index.core.workflow import Context
from llama_index.llms.openai import OpenAI
memory = ChatMemoryBuffer.from_defaults(token_limit=40000)
agent = FunctionAgent(tools=[], llm=OpenAI(model="gpt-4o-mini"))
# context to hold the chat history/state
ctx = Context(agent)
from llama_index.core.agent.workflow import ReActAgent, FunctionAgent from llama_index.core.workflow import Context from llama_index.llms.openai import OpenAI memory = ChatMemoryBuffer.from_defaults(token_limit=40000) agent = FunctionAgent(tools=[], llm=OpenAI(model="gpt-4o-mini")) # 用于保存聊天历史/状态的上下文 ctx = Context(agent)
In [ ]
已复制!
resp = await agent.run("Hello, how are you?", ctx=ctx, memory=memory)
resp = await agent.run("Hello, how are you?", ctx=ctx, memory=memory)
In [ ]
已复制!
print(memory.get_all())
print(memory.get_all())
[ChatMessage(role=<MessageRole.USER: 'user'>, additional_kwargs={}, blocks=[TextBlock(block_type='text', text='Hello, how are you?')]), ChatMessage(role=<MessageRole.ASSISTANT: 'assistant'>, additional_kwargs={}, blocks=[TextBlock(block_type='text', text="Hello! I'm just a program, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?")])]