在本教程中,我们将向您展示如何基于向量数据库构建标准检索器,该检索器将通过 top-k 相似度获取节点。
我们使用 Pinecone 作为向量数据库。我们使用高级摄取抽象来加载节点(要了解如何从零开始构建,请参阅我们之前的教程!)。
我们将展示如何执行以下操作:
如何生成查询嵌入
- 如何使用不同的搜索模式(密集、稀疏、混合)查询向量数据库
- 如何将结果解析为节点集
- 如何将其放入自定义检索器中
- 设置¶
我们构建一个空的 Pinecone 索引,并定义必要的 LlamaIndex 封装器/抽象,以便我们可以开始将数据加载到 Pinecone 中。
如果您在 colab 上打开此笔记本,您可能需要安装 LlamaIndex 🦙。
输入 [ ]
%pip install llama-index-readers-file pymupdf
%pip install llama-index-vector-stores-pinecone
%pip install llama-index-embeddings-openai
!pip install llama-index
from pinecone import Pinecone, Index, ServerlessSpec import os api_key = os.environ["PINECONE_API_KEY"] pc = Pinecone(api_key=api_key)
from pinecone import Pinecone, Index, ServerlessSpec
import os
api_key = os.environ["PINECONE_API_KEY"]
pc = Pinecone(api_key=api_key)
# dimensions are for text-embedding-ada-002
dataset_name = "quickstart"
if dataset_name not in pc.list_indexes().names():
pc.create_index(
dataset_name,
dimension=1536,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
)
pinecone_index = pc.Index(dataset_name)
# [Optional] drop contents in index
pinecone_index.delete(deleteAll=True)
LlamaIndex 中使用的简单封装抽象。封装在 StorageContext 中,以便我们可以轻松加载节点。
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.vector_stores.pinecone import PineconeVectorStore
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
!mkdir data !wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"
!mkdir data
!wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
将文档加载到 PineconeVectorStore 中。
注意:我们在这里使用高级摄取抽象,通过 VectorStoreIndex.from_documents
。本教程的其余部分我们将避免使用 VectorStoreIndex
。
from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.core import StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import StorageContext
splitter = SentenceSplitter(chunk_size=1024)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, transformations=[splitter], storage_context=storage_context
)
现在我们准备好针对此向量存储定义我们的检索器,以检索一组节点。
我们将逐步展示过程,然后将其封装到一个函数中。
query_str = "Can you tell me about the key concepts for safety finetuning"
query_str = "Can you tell me about the key concepts for safety finetuning"
from llama_index.embeddings.openai import OpenAIEmbedding embed_model = OpenAIEmbedding()
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
query_embedding = embed_model.get_query_embedding(query_str)
我们展示如何使用不同的模式查询向量数据库:默认、稀疏和混合。
我们首先构建一个 VectorStoreQuery
,然后查询向量数据库。
# construct vector store query from llama_index.core.vector_stores import VectorStoreQuery query_mode = "default" # query_mode = "sparse" # query_mode = "hybrid" vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=2, mode=query_mode )
# construct vector store query
from llama_index.core.vector_stores import VectorStoreQuery
query_mode = "default"
# query_mode = "sparse"
# query_mode = "hybrid"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
# returns a VectorStoreQueryResult
query_result = vector_store.query(vector_store_query)
query_result
VectorStoreQueryResult
返回节点集和相似度。我们用此构建一个 NodeWithScore
对象。
from llama_index.core.schema import NodeWithScore from typing import Optional nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(NodeWithScore(node=node, score=score))
from llama_index.core.schema import NodeWithScore
from typing import Optional
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
from llama_index.core.response.notebook_utils import display_source_node
for node in nodes_with_scores:
display_source_node(node, source_length=1000)
让我们将其放入一个检索器子类中,以便它可以插入 LlamaIndex 工作流的其余部分!
from llama_index.core import QueryBundle from llama_index.core.retrievers import BaseRetriever from typing import Any, List class PineconeRetriever(BaseRetriever): """Retriever over a pinecone vector store.""" def __init__( self, vector_store: PineconeVectorStore, embed_model: Any, query_mode: str = "default", similarity_top_k: int = 2, ) -> None: """Init params.""" self._vector_store = vector_store self._embed_model = embed_model self._query_mode = query_mode self._similarity_top_k = similarity_top_k super().__init__() def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: """Retrieve.""" if query_bundle.embedding is None: query_embedding = self._embed_model.get_query_embedding( query_bundle.query_str ) else: query_embedding = query_bundle.embedding vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=self._similarity_top_k, mode=self._query_mode, ) query_result = self._vector_store.query(vector_store_query) nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(NodeWithScore(node=node, score=score)) return nodes_with_scores
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
class PineconeRetriever(BaseRetriever):
"""Retriever over a pinecone vector store."""
def __init__(
self,
vector_store: PineconeVectorStore,
embed_model: Any,
query_mode: str = "default",
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._vector_store = vector_store
self._embed_model = embed_model
self._query_mode = query_mode
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
if query_bundle.embedding is None:
query_embedding = self._embed_model.get_query_embedding(
query_bundle.query_str
)
else:
query_embedding = query_bundle.embedding
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=self._similarity_top_k,
mode=self._query_mode,
)
query_result = self._vector_store.query(vector_store_query)
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
return nodes_with_scores
retriever = PineconeRetriever(
vector_store, embed_model, query_mode="default", similarity_top_k=2
)
retrieved_nodes = retriever.retrieve(query_str)
for node in retrieved_nodes:
display_source_node(node, source_length=1000)
注意:我们将在未来的教程中更详细地介绍如何从零开始构建响应合成!
from llama_index.core.query_engine import RetrieverQueryEngine query_engine = RetrieverQueryEngine.from_args(retriever)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query(query_str)
print(str(response))
The key concepts for safety fine-tuning include supervised safety fine-tuning, safety RLHF (Reinforcement Learning from Human Feedback), and safety context distillation. Supervised safety fine-tuning involves gathering adversarial prompts and safe demonstrations to train the model to align with safety guidelines. Safety RLHF integrates safety into the RLHF pipeline by training a safety-specific reward model and gathering challenging adversarial prompts for fine-tuning. Safety context distillation refines the RLHF pipeline by generating safer model responses using a safety preprompt and fine-tuning the model on these responses without the preprompt. These concepts are used to mitigate safety risks and improve the safety of the model's responses.