近因过滤¶
展示近因加权节点后处理器的功能
In [ ]
已复制!
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import os os.environ["OPENAI_API_KEY"] = "sk-..."
In [ ]
已复制!
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import (
FixedRecencyPostprocessor,
EmbeddingRecencyPostprocessor,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import ( FixedRecencyPostprocessor, EmbeddingRecencyPostprocessor, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.core.response.notebook_utils import display_response
将文档解析为节点,添加到 Docstore¶
在本例中,PG 的论文有 3 个不同版本。它们大体相同,除了一个具体的部分,该部分详细说明了他们为 Viaweb 筹集的资金金额。
V1: 5 万, V2: 3 万, V3: 1 万
V1: 2020-01-01, V2: 2020-02-03, V3: 2022-04-12
目的是鼓励索引获取最新信息(即 V3)
In [ ]
已复制!
# load documents
from llama_index.core import StorageContext
def get_file_metadata(file_name: str):
"""Get file metadata."""
if "v1" in file_name:
return {"date": "2020-01-01"}
elif "v2" in file_name:
return {"date": "2020-02-03"}
elif "v3" in file_name:
return {"date": "2022-04-12"}
else:
raise ValueError("invalid file")
documents = SimpleDirectoryReader(
input_files=[
"test_versioned_data/paul_graham_essay_v1.txt",
"test_versioned_data/paul_graham_essay_v2.txt",
"test_versioned_data/paul_graham_essay_v3.txt",
],
file_metadata=get_file_metadata,
).load_data()
# define settings
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
# use node parser to parse into nodes
nodes = Settings.text_splitter.get_nodes_from_documents(documents)
# add to docstore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
# load documents from llama_index.core import StorageContext def get_file_metadata(file_name: str): """Get file metadata.""" if "v1" in file_name: return {"date": "2020-01-01"} elif "v2" in file_name: return {"date": "2020-02-03"} elif "v3" in file_name: return {"date": "2022-04-12"} else: raise ValueError("invalid file") documents = SimpleDirectoryReader( input_files=[ "test_versioned_data/paul_graham_essay_v1.txt", "test_versioned_data/paul_graham_essay_v2.txt", "test_versioned_data/paul_graham_essay_v3.txt", ], file_metadata=get_file_metadata, ).load_data() # define settings from llama_index.core import Settings Settings.text_splitter = SentenceSplitter(chunk_size=512) # use node parser to parse into nodes nodes = Settings.text_splitter.get_nodes_from_documents(documents) # add to docstore docstore = SimpleDocumentStore() docstore.add_documents(nodes) storage_context = StorageContext.from_defaults(docstore=docstore)
In [ ]
已复制!
print(documents[2].get_text())
print(documents[2].get_text())
构建索引¶
In [ ]
已复制!
# build index
index = VectorStoreIndex(nodes, storage_context=storage_context)
# build index index = VectorStoreIndex(nodes, storage_context=storage_context)
INFO:llama_index.token_counter.token_counter:> [build_index_from_nodes] Total LLM token usage: 0 tokens INFO:llama_index.token_counter.token_counter:> [build_index_from_nodes] Total embedding token usage: 84471 tokens
定义近因后处理器¶
In [ ]
已复制!
node_postprocessor = FixedRecencyPostprocessor()
node_postprocessor = FixedRecencyPostprocessor()
In [ ]
已复制!
node_postprocessor_emb = EmbeddingRecencyPostprocessor()
node_postprocessor_emb = EmbeddingRecencyPostprocessor()
查询索引¶
In [ ]
已复制!
# naive query
query_engine = index.as_query_engine(
similarity_top_k=3,
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
# naive query query_engine = index.as_query_engine( similarity_top_k=3, ) response = query_engine.query( "How much did the author raise in seed funding from Idelle's husband" " (Julian) for Viaweb?", )
INFO:llama_index.token_counter.token_counter:> [query] Total LLM token usage: 1813 tokens INFO:llama_index.token_counter.token_counter:> [query] Total embedding token usage: 22 tokens
In [ ]
已复制!
# query using fixed recency node postprocessor
query_engine = index.as_query_engine(
similarity_top_k=3, node_postprocessors=[node_postprocessor]
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
# query using fixed recency node postprocessor query_engine = index.as_query_engine( similarity_top_k=3, node_postprocessors=[node_postprocessor] ) response = query_engine.query( "How much did the author raise in seed funding from Idelle's husband" " (Julian) for Viaweb?", )
In [ ]
已复制!
# query using embedding-based node postprocessor
query_engine = index.as_query_engine(
similarity_top_k=3, node_postprocessors=[node_postprocessor_emb]
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
# query using embedding-based node postprocessor query_engine = index.as_query_engine( similarity_top_k=3, node_postprocessors=[node_postprocessor_emb] ) response = query_engine.query( "How much did the author raise in seed funding from Idelle's husband" " (Julian) for Viaweb?", )
INFO:llama_index.token_counter.token_counter:> [query] Total LLM token usage: 541 tokens INFO:llama_index.token_counter.token_counter:> [query] Total embedding token usage: 22 tokens
查询索引(底层用法)¶
在本例中,我们首先从查询调用中获取完整的节点集,然后发送给节点后处理器,最后通过摘要索引合成响应。
In [ ]
已复制!
from llama_index.core import SummaryIndex
from llama_index.core import SummaryIndex
In [ ]
已复制!
query_str = (
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?"
)
query_str = ( "How much did the author raise in seed funding from Idelle's husband" " (Julian) for Viaweb?" )
In [ ]
已复制!
query_engine = index.as_query_engine(
similarity_top_k=3, response_mode="no_text"
)
init_response = query_engine.query(
query_str,
)
resp_nodes = [n.node for n in init_response.source_nodes]
query_engine = index.as_query_engine( similarity_top_k=3, response_mode="no_text" ) init_response = query_engine.query( query_str, ) resp_nodes = [n.node for n in init_response.source_nodes]
INFO:llama_index.token_counter.token_counter:> [query] Total LLM token usage: 0 tokens INFO:llama_index.token_counter.token_counter:> [query] Total embedding token usage: 22 tokens
In [ ]
已复制!
summary_index = SummaryIndex(resp_nodes)
query_engine = summary_index.as_query_engine(
node_postprocessors=[node_postprocessor]
)
response = query_engine.query(query_str)
summary_index = SummaryIndex(resp_nodes) query_engine = summary_index.as_query_engine( node_postprocessors=[node_postprocessor] ) response = query_engine.query(query_str)
INFO:llama_index.token_counter.token_counter:> [build_index_from_nodes] Total LLM token usage: 0 tokens INFO:llama_index.token_counter.token_counter:> [build_index_from_nodes] Total embedding token usage: 0 tokens INFO:llama_index.token_counter.token_counter:> [query] Total LLM token usage: 541 tokens INFO:llama_index.token_counter.token_counter:> [query] Total embedding token usage: 0 tokens