跳到内容

Lindorm

Bases: BasePydanticVectorStore

Lindorm 向量存储。

参数

名称

类型 描述 默认值 LindormVectorClient
添加 用于数据插入/查询的向量索引客户端。

必需

pip install llama-index pip install opensearch-py pip install llama-index-vector-stores-lindorm

示例

源代码位于 llama-index-integrations/vector_stores/llama-index-vector-stores-lindorm/llama_index/vector_stores/lindorm/base.py

from llama_index.vector_stores.lindorm import (
    LindormVectorStore,
    LindormVectorClient,
)

# lindorm instance info
# how to obtain an lindorm search instance:
# https://alibabacloud.com/help/en/lindorm/latest/create-an-instance

# how to access your lindorm search instance:
# https://www.alibabacloud.com/help/en/lindorm/latest/view-endpoints

# run curl commands to connect to and use LindormSearch:
# https://www.alibabacloud.com/help/en/lindorm/latest/connect-and-use-the-search-engine-with-the-curl-command
host = "ld-bp******jm*******-proxy-search-pub.lindorm.aliyuncs.com"
port = 30070
username = 'your_username'
password = 'your_password'

# index to demonstrate the VectorStore impl
index_name = "lindorm_test_index"

# extension param of lindorm search, number of cluster units to query; between 1 and method.parameters.nlist.
nprobe = "a number(string type)"

# extension param of lindorm search, usually used to improve recall accuracy, but it increases performance overhead;
#   between 1 and 200; default: 10.
reorder_factor = "a number(string type)"

# LindormVectorClient encapsulates logic for a single index with vector search enabled
client = LindormVectorClient(
    host=host,
    port=port,
    username=username,
    password=password,
    index=index_name,
    dimension=1536, # match with your embedding model
    nprobe=nprobe,
    reorder_factor=reorder_factor,
    # filter_type="pre_filter/post_filter(default)"
)

# initialize vector store
vector_store = LindormVectorStore(client)
client property #
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
class LindormVectorStore(BasePydanticVectorStore):
    """
    Lindorm vector store.

    Args:
        client (LindormVectorClient): Vector index client to use.
            for data insertion/querying.

    Examples:
        `pip install llama-index`
        `pip install opensearch-py`
        `pip install llama-index-vector-stores-lindorm`


        ```python
        from llama_index.vector_stores.lindorm import (
            LindormVectorStore,
            LindormVectorClient,
        )

        # lindorm instance info
        # how to obtain an lindorm search instance:
        # https://alibabacloud.com/help/en/lindorm/latest/create-an-instance

        # how to access your lindorm search instance:
        # https://www.alibabacloud.com/help/en/lindorm/latest/view-endpoints

        # run curl commands to connect to and use LindormSearch:
        # https://www.alibabacloud.com/help/en/lindorm/latest/connect-and-use-the-search-engine-with-the-curl-command
        host = "ld-bp******jm*******-proxy-search-pub.lindorm.aliyuncs.com"
        port = 30070
        username = 'your_username'
        password = 'your_password'

        # index to demonstrate the VectorStore impl
        index_name = "lindorm_test_index"

        # extension param of lindorm search, number of cluster units to query; between 1 and method.parameters.nlist.
        nprobe = "a number(string type)"

        # extension param of lindorm search, usually used to improve recall accuracy, but it increases performance overhead;
        #   between 1 and 200; default: 10.
        reorder_factor = "a number(string type)"

        # LindormVectorClient encapsulates logic for a single index with vector search enabled
        client = LindormVectorClient(
            host=host,
            port=port,
            username=username,
            password=password,
            index=index_name,
            dimension=1536, # match with your embedding model
            nprobe=nprobe,
            reorder_factor=reorder_factor,
            # filter_type="pre_filter/post_filter(default)"
        )

        # initialize vector store
        vector_store = LindormVectorStore(client)
        ```

    """

    stores_text: bool = True
    _client: LindormVectorClient = PrivateAttr(default=None)

    def __init__(
        self,
        client: LindormVectorClient,
    ) -> None:
        """Initialize params."""
        super().__init__()
        self._client = client

    @property
    def client(self) -> Any:
        """Get client."""
        return self._client

    def add(
        self,
        nodes: List[BaseNode],
        **add_kwargs: Any,
    ) -> List[str]:
        """
        Add nodes to index.
        Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.

        Args:
            nodes: List[BaseNode]: list of nodes with embeddings.

        Returns:
            List[str]: List of node_ids

        """
        return asyncio.get_event_loop().run_until_complete(
            self.async_add(nodes, **add_kwargs)
        )

    async def async_add(
        self,
        nodes: List[BaseNode],
        **add_kwargs: Any,
    ) -> List[str]:
        """
        Async add nodes to index.

        Args:
            nodes: List[BaseNode]: list of nodes with embeddings.

        Returns:
            List[str]: List of node_ids

        """
        await self._client.index_results(nodes)
        return [result.node_id for result in nodes]

    def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
        """
        Delete nodes using a ref_doc_id.
        Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.

        Args:
            ref_doc_id (str): The doc_id of the document whose nodes should be deleted.

        """
        asyncio.get_event_loop().run_until_complete(
            self.adelete(ref_doc_id, **delete_kwargs)
        )

    async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
        """
        Async delete nodes using a ref_doc_id.

        Args:
            ref_doc_id (str): The doc_id of the document whose nodes should be deleted.

        """
        await self._client.delete_by_doc_id(ref_doc_id)

    def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
        """
        Query index for top k most similar nodes.
        Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.

        Args:
            query (VectorStoreQuery): Store query object.

        """
        return asyncio.get_event_loop().run_until_complete(self.aquery(query, **kwargs))

    async def aquery(
        self, query: VectorStoreQuery, **kwargs: Any
    ) -> VectorStoreQueryResult:
        """
        Async query index for top k most similar nodes.

        Args:
            query (VectorStoreQuery): Store query object.

        """
        query_embedding = cast(List[float], query.query_embedding)
        return await self._client.aquery(
            query.mode,
            query.query_str,
            query_embedding,
            query.similarity_top_k,
            filters=query.filters,
        )

获取客户端。

client: Any

add #

将节点添加到索引。同步包装器,以同步方式使用 async_add 函数的异步逻辑。

add(nodes: List[BaseNode], **add_kwargs: Any) -> List[str]

节点

名称

类型 描述 默认值 LindormVectorClient
List[BaseNode] List[BaseNode]:带有嵌入的节点列表。

返回值

pip install llama-index pip install opensearch-py pip install llama-index-vector-stores-lindorm

List[str]

描述 默认值
List[str]:node_id 列表

List[str]: 节点ID列表

client property #
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
def add(
    self,
    nodes: List[BaseNode],
    **add_kwargs: Any,
) -> List[str]:
    """
    Add nodes to index.
    Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.

    Args:
        nodes: List[BaseNode]: list of nodes with embeddings.

    Returns:
        List[str]: List of node_ids

    """
    return asyncio.get_event_loop().run_until_complete(
        self.async_add(nodes, **add_kwargs)
    )

async_add async #

async_add(nodes: List[BaseNode], **add_kwargs: Any) -> List[str]

异步向索引添加节点。

名称

类型 描述 默认值 LindormVectorClient
List[BaseNode] List[BaseNode]:带有嵌入的节点列表。

返回值

pip install llama-index pip install opensearch-py pip install llama-index-vector-stores-lindorm

List[str]

描述 默认值
List[str]:node_id 列表

List[str]: 节点ID列表

client property #
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
async def async_add(
    self,
    nodes: List[BaseNode],
    **add_kwargs: Any,
) -> List[str]:
    """
    Async add nodes to index.

    Args:
        nodes: List[BaseNode]: list of nodes with embeddings.

    Returns:
        List[str]: List of node_ids

    """
    await self._client.index_results(nodes)
    return [result.node_id for result in nodes]

delete #

delete(ref_doc_id: str, **delete_kwargs: Any) -> None

使用 ref_doc_id 删除节点。同步包装器,以同步方式使用 async_add 函数的异步逻辑。

名称

类型 描述 默认值 LindormVectorClient
ref_doc_id str

应删除其节点的文档的 doc_id。

pip install llama-index pip install opensearch-py pip install llama-index-vector-stores-lindorm
client property #
880
881
882
883
884
885
886
887
888
889
890
891
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
    """
    Delete nodes using a ref_doc_id.
    Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.

    Args:
        ref_doc_id (str): The doc_id of the document whose nodes should be deleted.

    """
    asyncio.get_event_loop().run_until_complete(
        self.adelete(ref_doc_id, **delete_kwargs)
    )

adelete async #

adelete(ref_doc_id: str, **delete_kwargs: Any) -> None

使用 ref_doc_id 异步删除节点。

名称

类型 描述 默认值 LindormVectorClient
ref_doc_id str

应删除其节点的文档的 doc_id。

pip install llama-index pip install opensearch-py pip install llama-index-vector-stores-lindorm
client property #
893
894
895
896
897
898
899
900
901
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
    """
    Async delete nodes using a ref_doc_id.

    Args:
        ref_doc_id (str): The doc_id of the document whose nodes should be deleted.

    """
    await self._client.delete_by_doc_id(ref_doc_id)

query #

query(query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult

查询索引以获取 top k 最相似的节点。同步包装器,以同步方式使用 async_add 函数的异步逻辑。

名称

类型 描述 默认值 LindormVectorClient
异步查询 VectorStoreQuery

存储查询对象。

pip install llama-index pip install opensearch-py pip install llama-index-vector-stores-lindorm
client property #
903
904
905
906
907
908
909
910
911
912
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
    """
    Query index for top k most similar nodes.
    Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.

    Args:
        query (VectorStoreQuery): Store query object.

    """
    return asyncio.get_event_loop().run_until_complete(self.aquery(query, **kwargs))

aquery async #

aquery(query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult

异步查询索引以获取 top k 最相似的节点。

名称

类型 描述 默认值 LindormVectorClient
异步查询 VectorStoreQuery

存储查询对象。

pip install llama-index pip install opensearch-py pip install llama-index-vector-stores-lindorm
client property #
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
async def aquery(
    self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
    """
    Async query index for top k most similar nodes.

    Args:
        query (VectorStoreQuery): Store query object.

    """
    query_embedding = cast(List[float], query.query_embedding)
    return await self._client.aquery(
        query.mode,
        query.query_str,
        query_embedding,
        query.similarity_top_k,
        filters=query.filters,
    )