In [ ]
已复制!
%pip install llama-index-readers-file
%pip install llama-index-llms-openai
%pip install llama-index-embeddings-openai
%pip install llama-index-readers-file %pip install llama-index-llms-openai %pip install llama-index-embeddings-openai
In [ ]
已复制!
!pip install llama-index
!pip install llama-index
加载数据 + 设置¶
加载 Tesla 数据。
In [ ]
已复制!
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None)
In [ ]
已复制!
!wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm
!wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm
!wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm !wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm
In [ ]
已复制!
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm"))
在这里,我们定义一个完全用于生成合成评估数据集的摄取管道。
In [ ]
已复制!
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply()
In [ ]
已复制!
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200), OpenAIEmbedding(), ], ) eval_nodes = pipeline.run(documents=docs)
In [ ]
已复制!
# NOTE: run this if the dataset isn't already saved
# Note: we only generate from the first 20 nodes, since the rest are references
# eval_llm = OpenAI(model="gpt-4-1106-preview")
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
# 注意:如果数据集尚未保存,请运行此命令 # 注意:我们仅从前 20 个节点生成,因为其余部分是引用。 # eval_llm = OpenAI(model="gpt-4-1106-preview") eval_llm = OpenAI(model="gpt-3.5-turbo") dataset_generator = DatasetGenerator( eval_nodes[:100], llm=eval_llm, show_progress=True, num_questions_per_chunk=3, )
In [ ]
已复制!
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
In [ ]
已复制!
len(eval_dataset.qr_pairs)
len(eval_dataset.qr_pairs)
Out[ ]
100
In [ ]
已复制!
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
In [ ]
已复制!
# optional
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
# 可选 eval_dataset = QueryResponseDataset.from_json( "data/tesla10k_eval_dataset.json" )
In [ ]
已复制!
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
eval_qs = eval_dataset.questions qr_pairs = eval_dataset.qr_pairs ref_response_strs = [r for (_, r) in qr_pairs]
运行评估¶
In [ ]
已复制!
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import BatchEvalRunner
In [ ]
已复制!
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm) evaluator_dict = { "correctness": evaluator_c, "semantic_similarity": evaluator_s, } batch_eval_runner = BatchEvalRunner( evaluator_dict, workers=2, show_progress=True )
In [ ]
已复制!
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
# get query engine
nodes = pipeline.run(documents=docs)
# define vector index (top-k = 2)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core import VectorStoreIndex async def run_evals( pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref ): # 获取查询引擎 nodes = pipeline.run(documents=docs) # 定义向量索引 (top-k = 2) vector_index = VectorStoreIndex(nodes) query_engine = vector_index.as_query_engine() pred_responses = get_responses(eval_qs, query_engine, show_progress=True) eval_results = await batch_eval_runner.aevaluate_responses( eval_qs, responses=pred_responses, reference=eval_responses_ref ) return eval_results
1. 尝试不同的句子分割器(重叠)¶
分块策略很重要!在这里,我们尝试使用不同重叠值的句子分割器,以查看其对性能的影响。
`IngestionPipeline` 允许我们简洁地定义用于 RAG 的端到端转换管道,并且我们定义了对应于不同句子分割器配置的变体(同时保持其他步骤固定)。
In [ ]
已复制!
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
# For clarity in the demo, make small splits without overlap
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = HTMLNodeParser.from_defaults()
parser_dict = {
"sent_parser_o0": sent_parser_o0,
"sent_parser_o200": sent_parser_o200,
"sent_parser_o500": sent_parser_o500,
}
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter # 为了演示清晰,进行无重叠的小分割 sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0) sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200) sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600) html_parser = HTMLNodeParser.from_defaults() parser_dict = { "sent_parser_o0": sent_parser_o0, "sent_parser_o200": sent_parser_o200, "sent_parser_o500": sent_parser_o500, }
为每个解析器定义单独的管道。
In [ ]
已复制!
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
# generate a pipeline for each parser
# keep embedding model fixed
pipeline_dict = {}
for k, parser in parser_dict.items():
pipeline = IngestionPipeline(
documents=docs,
transformations=[
html_parser,
parser,
OpenAIEmbedding(),
],
)
pipeline_dict[k] = pipeline
from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.ingestion import IngestionPipeline # 为每个解析器生成一个管道 # 保持嵌入模型固定 pipeline_dict = {} for k, parser in parser_dict.items(): pipeline = IngestionPipeline( documents=docs, transformations=[ html_parser, parser, OpenAIEmbedding(), ], ) pipeline_dict[k] = pipeline
In [ ]
已复制!
eval_results_dict = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict[k] = eval_results
eval_results_dict = {} for k, pipeline in pipeline_dict.items(): eval_results = await run_evals( pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs ) eval_results_dict[k] = eval_results
In [ ]
已复制!
# [tmp] save eval results
import pickle
pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
# [临时] 保存评估结果 import pickle pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
In [ ]
已复制!
eval_results_list = list(eval_results_dict.items())
results_df = get_results_df(
[v for _, v in eval_results_list],
[k for k, _ in eval_results_list],
["correctness", "semantic_similarity"],
)
display(results_df)
eval_results_list = list(eval_results_dict.items()) results_df = get_results_df( [v for _, v in eval_results_list], [k for k, _ in eval_results_list], ["correctness", "semantic_similarity"], ) display(results_df)
名称 | 正确性 | 语义相似度 | |
---|---|---|---|
0 | sent_parser_o0 | 4.310 | 0.972838 |
1 | sent_parser_o200 | 4.335 | 0.978842 |
2 | sent_parser_o500 | 4.270 | 0.971759 |
In [ ]
已复制!
# [optional] persist cache in folders so we can reuse
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
# [可选] 将缓存持久化到文件夹中以便重用 for k, pipeline in pipeline_dict.items(): pipeline.cache.persist(f"./cache/{k}.json")
2. 尝试不同的提取器¶
类似地,元数据提取对于良好的性能非常重要。我们在整体摄取管道中将其作为最后一步进行实验,并定义对应于不同提取器的不同摄取管道变体。
我们定义了要尝试的文档提取器集合。
我们保持解析器固定(HTML 解析器,重叠为 200 的句子分割器)和嵌入模型固定(OpenAIEmbedding)。
In [ ]
已复制!
from llama_index.core.extractors import (
TitleExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
)
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
# generate a pipeline for each extractor
# keep embedding model fixed
extractor_dict = {
# "title": TitleExtractor(),
"summary": SummaryExtractor(in_place=False),
"qa": QuestionsAnsweredExtractor(in_place=False),
"default": None,
}
# these are the parsers that will run beforehand
html_parser = HTMLNodeParser.from_defaults()
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
from llama_index.core.extractors import ( TitleExtractor, QuestionsAnsweredExtractor, SummaryExtractor, ) from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter # 为每个提取器生成一个管道 # 保持嵌入模型固定 extractor_dict = { # "title": TitleExtractor(), "summary": SummaryExtractor(in_place=False), "qa": QuestionsAnsweredExtractor(in_place=False), "default": None, } # 这些是之前将运行的解析器 html_parser = HTMLNodeParser.from_defaults() sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
In [ ]
已复制!
pipeline_dict = {}
html_parser = HTMLNodeParser.from_defaults()
for k, extractor in extractor_dict.items():
if k == "default":
transformations = [
html_parser,
sent_parser_o200,
OpenAIEmbedding(),
]
else:
transformations = [
html_parser,
sent_parser_o200,
extractor,
OpenAIEmbedding(),
]
pipeline = IngestionPipeline(transformations=transformations)
pipeline_dict[k] = pipeline
pipeline_dict = {} html_parser = HTMLNodeParser.from_defaults() for k, extractor in extractor_dict.items(): if k == "default": transformations = [ html_parser, sent_parser_o200, OpenAIEmbedding(), ] else: transformations = [ html_parser, sent_parser_o200, extractor, OpenAIEmbedding(), ] pipeline = IngestionPipeline(transformations=transformations) pipeline_dict[k] = pipeline
In [ ]
已复制!
eval_results_dict_2 = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict_2[k] = eval_results
eval_results_dict_2 = {} for k, pipeline in pipeline_dict.items(): eval_results = await run_evals( pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs ) eval_results_dict_2[k] = eval_results
In [ ]
已复制!
eval_results_list_2 = list(eval_results_dict_2.items())
results_df = get_results_df(
[v for _, v in eval_results_list_2],
[k for k, _ in eval_results_list_2],
["correctness", "semantic_similarity"],
)
display(results_df)
eval_results_list_2 = list(eval_results_dict_2.items()) results_df = get_results_df( [v for _, v in eval_results_list_2], [k for k, _ in eval_results_list_2], ["correctness", "semantic_similarity"], ) display(results_df)
名称 | 正确性 | 语义相似度 | |
---|---|---|---|
0 | 摘要 | 4.315 | 0.976951 |
1 | 问答 | 4.355 | 0.978807 |
2 | 默认 | 4.305 | 0.978451 |
In [ ]
已复制!
# [optional] persist cache in folders so we can reuse
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
# [可选] 将缓存持久化到文件夹中以便重用 for k, pipeline in pipeline_dict.items(): pipeline.cache.persist(f"./cache/{k}.json")
3. 尝试多个提取器(带缓存)¶
待办事项
每个提取步骤都可能因 LLM 调用而昂贵。如果我们想尝试多个提取器怎么办?
我们利用**缓存**,这样所有之前的提取器调用都会被缓存,而我们只对最终的提取器调用进行实验。`IngestionPipeline` 为我们提供了干净的抽象,以便可以轻松尝试最终的提取器。
尝试不同的提取器