初始化和实验记忆模块¶
在这里,我们初始化一个原始记忆模块,并演示其功能 - 从 ChatMessage
对象存入和检索。
- 请注意,
retriever_kwargs
与您在VectorIndexRetriever
或index.as_retriever(..)
上指定的参数相同。
输入 [ ]
已复制!
from llama_index.core.memory import VectorMemory
from llama_index.embeddings.openai import OpenAIEmbedding
vector_memory = VectorMemory.from_defaults(
vector_store=None, # leave as None to use default in-memory vector store
embed_model=OpenAIEmbedding(),
retriever_kwargs={"similarity_top_k": 1},
)
from llama_index.core.memory import VectorMemory from llama_index.embeddings.openai import OpenAIEmbedding vector_memory = VectorMemory.from_defaults( vector_store=None, # leave as None to use default in-memory vector store embed_model=OpenAIEmbedding(), retriever_kwargs={"similarity_top_k": 1}, )
输入 [ ]
已复制!
from llama_index.core.llms import ChatMessage
msgs = [
ChatMessage.from_str("Jerry likes juice.", "user"),
ChatMessage.from_str("Bob likes burgers.", "user"),
ChatMessage.from_str("Alice likes apples.", "user"),
]
from llama_index.core.llms import ChatMessage msgs = [ ChatMessage.from_str("Jerry likes juice.", "user"), ChatMessage.from_str("Bob likes burgers.", "user"), ChatMessage.from_str("Alice likes apples.", "user"), ]
输入 [ ]
已复制!
# load into memory
for m in msgs:
vector_memory.put(m)
# 加载到记忆中 for m in msgs: vector_memory.put(m)
输入 [ ]
已复制!
# retrieve from memory
msgs = vector_memory.get("What does Jerry like?")
msgs
# 从记忆中检索 msgs = vector_memory.get("What does Jerry like?") msgs
输出 [ ]
[ChatMessage(role=<MessageRole.USER: 'user'>, content='Jerry likes juice.', additional_kwargs={})]
输入 [ ]
已复制!
vector_memory.reset()
vector_memory.reset()
现在让我们尝试重置并再次尝试。这次,我们将添加一个助手消息。请注意,用户/助手消息默认是捆绑在一起的。
输入 [ ]
已复制!
msgs = [
ChatMessage.from_str("Jerry likes burgers.", "user"),
ChatMessage.from_str("Bob likes apples.", "user"),
ChatMessage.from_str("Indeed, Bob likes apples.", "assistant"),
ChatMessage.from_str("Alice likes juice.", "user"),
]
vector_memory.set(msgs)
msgs = [ ChatMessage.from_str("Jerry likes burgers.", "user"), ChatMessage.from_str("Bob likes apples.", "user"), ChatMessage.from_str("Indeed, Bob likes apples.", "assistant"), ChatMessage.from_str("Alice likes juice.", "user"), ] vector_memory.set(msgs)
输入 [ ]
已复制!
msgs = vector_memory.get("What does Bob like?")
msgs
msgs = vector_memory.get("What does Bob like?") msgs
输出 [ ]
[ChatMessage(role=<MessageRole.USER: 'user'>, content='Bob likes apples.', additional_kwargs={}), ChatMessage(role=<MessageRole.ASSISTANT: 'assistant'>, content='Indeed, Bob likes apples.', additional_kwargs={})]