Pinecone 向量存储¶
如果你在 colab 上打开此 Notebook,可能需要安装 LlamaIndex 🦙。
In [ ]
已复制!
%pip install llama-index llama-index-vector-stores-pinecone
%pip install llama-index llama-index-vector-stores-pinecone
In [ ]
已复制!
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
创建 Pinecone 索引¶
In [ ]
已复制!
from pinecone import Pinecone, ServerlessSpec
from pinecone import Pinecone, ServerlessSpec
In [ ]
已复制!
os.environ["PINECONE_API_KEY"] = "..."
os.environ["OPENAI_API_KEY"] = "sk-proj-..."
api_key = os.environ["PINECONE_API_KEY"]
pc = Pinecone(api_key=api_key)
os.environ["PINECONE_API_KEY"] = "..." os.environ["OPENAI_API_KEY"] = "sk-proj-..." api_key = os.environ["PINECONE_API_KEY"] pc = Pinecone(api_key=api_key)
In [ ]
已复制!
# delete if needed
# pc.delete_index("quickstart")
# 如果需要可以删除 # pc.delete_index("quickstart")
In [ ]
已复制!
# dimensions are for text-embedding-ada-002
pc.create_index(
name="quickstart",
dimension=1536,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
)
# If you need to create a PodBased Pinecone index, you could alternatively do this:
#
# from pinecone import Pinecone, PodSpec
#
# pc = Pinecone(api_key='xxx')
#
# pc.create_index(
# name='my-index',
# dimension=1536,
# metric='cosine',
# spec=PodSpec(
# environment='us-east1-gcp',
# pod_type='p1.x1',
# pods=1
# )
# )
#
# 维度适用于 text-embedding-ada-002 pc.create_index( name="quickstart", dimension=1536, metric="euclidean", spec=ServerlessSpec(cloud="aws", region="us-east-1"), ) # 如果需要创建基于 Pod 的 Pinecone 索引,可以采用以下方式: # # from pinecone import Pinecone, PodSpec # # pc = Pinecone(api_key='xxx') # # pc.create_index( # name='my-index', # dimension=1536, # metric='cosine', # spec=PodSpec( # environment='us-east1-gcp', # pod_type='p1.x1', # pods=1 # ) # ) #
In [ ]
已复制!
pinecone_index = pc.Index("quickstart")
pinecone_index = pc.Index("quickstart")
加载文档,构建 PineconeVectorStore 和 VectorStoreIndex¶
In [ ]
已复制!
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.pinecone import PineconeVectorStore
from IPython.display import Markdown, display
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.pinecone import PineconeVectorStore from IPython.display import Markdown, display
下载数据
In [ ]
已复制!
!mkdir -p 'data/paul_graham/'
!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'
!mkdir -p 'data/paul_graham/' !wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'
In [ ]
已复制!
# load documents
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
# 加载文档 documents = SimpleDirectoryReader("./data/paul_graham").load_data()
In [ ]
已复制!
# initialize without metadata filter
from llama_index.core import StorageContext
if "OPENAI_API_KEY" not in os.environ:
raise EnvironmentError(f"Environment variable OPENAI_API_KEY is not set")
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
# 初始化时不带元数据过滤器 from llama_index.core import StorageContext if "OPENAI_API_KEY" not in os.environ: raise EnvironmentError(f"Environment variable OPENAI_API_KEY is not set") vector_store = PineconeVectorStore(pinecone_index=pinecone_index) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context )
查询索引¶
索引可能需要一分钟左右才能准备好!
In [ ]
已复制!
# set Logging to DEBUG for more detailed outputs
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
# 将日志级别设置为 DEBUG 以获得更详细的输出 query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?")
INFO:httpx:HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 200 OK" HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 200 OK" INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
In [ ]
已复制!
display(Markdown(f"<b>{response}</b>"))
display(Markdown(f"{response}"))
作者在成长过程中,致力于写作和编程。他们写过短篇故事,并尝试在 IBM 1401 计算机上编写程序。后来他们有了一台微型计算机,并开始更广泛地进行编程,编写了简单的游戏和文字处理器。
过滤¶
您也可以使用过滤器直接获取节点列表。
In [ ]
已复制!
from llama_index.core.vector_stores.types import (
MetadataFilter,
MetadataFilters,
FilterOperator,
FilterCondition,
)
filter = MetadataFilters(
filters=[
MetadataFilter(
key="file_path",
value="/Users/loganmarkewich/giant_change/llama_index/docs/docs/examples/vector_stores/data/paul_graham/paul_graham_essay.txt",
operator=FilterOperator.EQ,
)
],
condition=FilterCondition.AND,
)
from llama_index.core.vector_stores.types import ( MetadataFilter, MetadataFilters, FilterOperator, FilterCondition, ) filter = MetadataFilters( filters=[ MetadataFilter( key="file_path", value="/Users/loganmarkewich/giant_change/llama_index/docs/docs/examples/vector_stores/data/paul_graham/paul_graham_essay.txt", operator=FilterOperator.EQ, ) ], condition=FilterCondition.AND, )
您可以使用过滤器直接获取节点。以下代码将返回所有匹配过滤条件的节点。
In [ ]
已复制!
nodes = vector_store.get_nodes(filters=filter, limit=100)
print(len(nodes))
nodes = vector_store.get_nodes(filters=filter, limit=100) print(len(nodes))
22
您也可以使用 top-k 和过滤器进行获取。
In [ ]
已复制!
query_engine = index.as_query_engine(similarity_top_k=2, filters=filter)
response = query_engine.query("What did the author do growing up?")
print(len(response.source_nodes))
query_engine = index.as_query_engine(similarity_top_k=2, filters=filter) response = query_engine.query("What did the author do growing up?") print(len(response.source_nodes))
INFO:httpx:HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 200 OK" HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 200 OK" INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" 2