In [ ]
已复制!
%pip install llama-index-llms-openai pandas[jinja2] spacy
%pip install llama-index-llms-openai pandas[jinja2] spacy
In [ ]
已复制!
# attach to the same event-loop
import nest_asyncio
nest_asyncio.apply()
# attach to the same event-loop import nest_asyncio nest_asyncio.apply()
In [ ]
已复制!
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import os os.environ["OPENAI_API_KEY"] = "sk-..."
In [ ]
已复制!
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
Response,
)
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import FaithfulnessEvaluator
from llama_index.core.node_parser import SentenceSplitter
import pandas as pd
pd.set_option("display.max_colwidth", 0)
from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, Response, ) from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import FaithfulnessEvaluator from llama_index.core.node_parser import SentenceSplitter import pandas as pd pd.set_option("display.max_colwidth", 0)
此处使用 GPT-4 进行评估
In [ ]
已复制!
# gpt-4
gpt4 = OpenAI(temperature=0, model="gpt-4")
evaluator_gpt4 = FaithfulnessEvaluator(llm=gpt4)
# gpt-4 gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4 = FaithfulnessEvaluator(llm=gpt4)
In [ ]
已复制!
documents = SimpleDirectoryReader("./test_wiki_data/").load_data()
documents = SimpleDirectoryReader("./test_wiki_data/").load_data()
In [ ]
已复制!
# create vector index
splitter = SentenceSplitter(chunk_size=512)
vector_index = VectorStoreIndex.from_documents(
documents, transformations=[splitter]
)
# create vector index splitter = SentenceSplitter(chunk_size=512) vector_index = VectorStoreIndex.from_documents( documents, transformations=[splitter] )
In [ ]
已复制!
from llama_index.core.evaluation import EvaluationResult
# define jupyter display function
def display_eval_df(response: Response, eval_result: EvaluationResult) -> None:
if response.source_nodes == []:
print("no response!")
return
eval_df = pd.DataFrame(
{
"Response": str(response),
"Source": response.source_nodes[0].node.text[:1000] + "...",
"Evaluation Result": "Pass" if eval_result.passing else "Fail",
"Reasoning": eval_result.feedback,
},
index=[0],
)
eval_df = eval_df.style.set_properties(
**{
"inline-size": "600px",
"overflow-wrap": "break-word",
},
subset=["Response", "Source"]
)
display(eval_df)
from llama_index.core.evaluation import EvaluationResult # define jupyter display function def display_eval_df(response: Response, eval_result: EvaluationResult) -> None: if response.source_nodes == []: print("no response!") return eval_df = pd.DataFrame( { "Response": str(response), "Source": response.source_nodes[0].node.text[:1000] + "...", "Evaluation Result": "Pass" if eval_result.passing else "Fail", "Reasoning": eval_result.feedback, }, index=[0], ) eval_df = eval_df.style.set_properties( **{ "inline-size": "600px", "overflow-wrap": "break-word", }, subset=["Response", "Source"] ) display(eval_df)
要运行评估,您可以在查询返回的 Response
对象上调用 .evaluate_response()
函数来运行评估。让我们评估一下 vector_index 的输出。
In [ ]
已复制!
query_engine = vector_index.as_query_engine()
response_vector = query_engine.query("How did New York City get its name?")
eval_result = evaluator_gpt4.evaluate_response(response=response_vector)
query_engine = vector_index.as_query_engine() response_vector = query_engine.query("How did New York City get its name?") eval_result = evaluator_gpt4.evaluate_response(response=response_vector)
In [ ]
已复制!
display_eval_df(response_vector, eval_result)
display_eval_df(response_vector, eval_result)
语义相似度 | 来源 | 评估结果 | 原因 | |
---|---|---|---|---|
0 | 纽约市于 1664 年归英国控制时得名。在英格兰国王查理二世将其土地授予其兄弟约克公爵后,它被更名为纽约。 | 该城市于 1664 年归英国控制,在英格兰国王查理二世将其土地授予其兄弟约克公爵后被更名为纽约。该城市于 1673 年 7 月被荷兰人重新夺回,并被更名为新奥兰治(New Orange),为期一年零三个月;自 1674 年 11 月起,该城市一直沿用纽约的名称。纽约市于 1785 年至 1790 年间是美国的首都,自 1790 年以来一直是美国最大的城市。在 19 世纪末和 20 世纪初,数百万移民乘船来到美国,自由女神像迎接了他们,它是美国及其自由与和平理想的象征。在 21 世纪,纽约市已成为全球创意、创业的重要节点,并成为自由和文化多样性的象征。《纽约时报》赢得了最多的普利策新闻奖,并仍然是美国媒体的“记录报纸”。2019 年,在对超过 30,000 名参与者进行的调查中,纽约市被评选为世界上最伟大的城市... | 通过 | 是 |
对生成问题的基准测试¶
现在让我们生成更多问题,以便进行更多评估并运行小型基准测试。
In [ ]
已复制!
from llama_index.core.evaluation import DatasetGenerator
question_generator = DatasetGenerator.from_documents(documents)
eval_questions = question_generator.generate_questions_from_nodes(5)
eval_questions
from llama_index.core.evaluation import DatasetGenerator question_generator = DatasetGenerator.from_documents(documents) eval_questions = question_generator.generate_questions_from_nodes(5) eval_questions
/Users/loganmarkewich/giant_change/llama_index/llama-index-core/llama_index/core/evaluation/dataset_generation.py:212: DeprecationWarning: Call to deprecated class DatasetGenerator. (Deprecated in favor of `RagDatasetGenerator` which should be used instead.) return cls( /Users/loganmarkewich/giant_change/llama_index/llama-index-core/llama_index/core/evaluation/dataset_generation.py:309: DeprecationWarning: Call to deprecated class QueryResponseDataset. (Deprecated in favor of `LabelledRagDataset` which should be used instead.) return QueryResponseDataset(queries=queries, responses=responses_dict)
Out[ ]
['What is the population of New York City as of 2020?', 'Which city is the second-largest in the United States?', 'How many people live within 250 miles of New York City?', 'What are the five boroughs of New York City?', 'What is the gross metropolitan product of the New York metropolitan area?']
In [ ]
已复制!
import asyncio
def evaluate_query_engine(query_engine, questions):
c = [query_engine.aquery(q) for q in questions]
results = asyncio.run(asyncio.gather(*c))
print("finished query")
total_correct = 0
for r in results:
# evaluate with gpt 4
eval_result = (
1 if evaluator_gpt4.evaluate_response(response=r).passing else 0
)
total_correct += eval_result
return total_correct, len(results)
import asyncio def evaluate_query_engine(query_engine, questions): c = [query_engine.aquery(q) for q in questions] results = asyncio.run(asyncio.gather(*c)) print("finished query") total_correct = 0 for r in results: # evaluate with gpt 4 eval_result = ( 1 if evaluator_gpt4.evaluate_response(response=r).passing else 0 ) total_correct += eval_result return total_correct, len(results)
In [ ]
已复制!
vector_query_engine = vector_index.as_query_engine()
correct, total = evaluate_query_engine(vector_query_engine, eval_questions[:5])
print(f"score: {correct}/{total}")
vector_query_engine = vector_index.as_query_engine() correct, total = evaluate_query_engine(vector_query_engine, eval_questions[:5]) print(f"score: {correct}/{total}")
finished query score: 5/5