In [ ]:
Copied!
from llama_index import VectorStoreIndex, SimpleDirectoryReader
# Load documents and build index
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(documents)
from llama_index import VectorStoreIndex, SimpleDirectoryReader
# Load documents and build index
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(documents)
自定义向量存储
您可以通过以下方式使用自定义向量存储(本例中为 PineconeVectorStore):
In [ ]:
Copied!
import pinecone
from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.vector_stores import PineconeVectorStore
# init pinecone
pinecone.init(api_key="<api_key>", environment="<environment>")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
# construct vector store and customize storage context
storage_context = StorageContext.from_defaults(
vector_store=PineconeVectorStore(pinecone.Index("quickstart"))
)
# Load documents and build index
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
import pinecone
from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.vector_stores import PineconeVectorStore
# init pinecone
pinecone.init(api_key="", environment="")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
# construct vector store and customize storage context
storage_context = StorageContext.from_defaults(
vector_store=PineconeVectorStore(pinecone.Index("quickstart"))
)
# Load documents and build index
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
如需查看更多初始化不同向量存储的示例, 请参阅向量存储集成。
连接外部向量数据库(使用现有嵌入向量)¶
若您已计算好嵌入向量并将其存入外部向量数据库(如 Pinecone、Chroma),可通过以下方式与 LlamaIndex 配合使用:
In [ ]:
Copied!
vector_store = PineconeVectorStore(pinecone.Index("quickstart"))
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
vector_store = PineconeVectorStore(pinecone.Index("quickstart"))
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
In [ ]:
Copied!
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
配置标准查询设置
要配置查询设置,您可以在构建查询引擎时直接将其作为关键字参数传递:
In [ ]:
Copied!
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
query_engine = index.as_query_engine(
similarity_top_k=3,
vector_store_query_mode="default",
filters=MetadataFilters(
filters=[
ExactMatchFilter(key="name", value="paul graham"),
]
),
alpha=None,
doc_ids=None,
)
response = query_engine.query("what did the author do growing up?")
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
query_engine = index.as_query_engine(
similarity_top_k=3,
vector_store_query_mode="default",
filters=MetadataFilters(
filters=[
ExactMatchFilter(key="name", value="paul graham"),
]
),
alpha=None,
doc_ids=None,
)
response = query_engine.query("what did the author do growing up?")
请注意,元数据筛选是针对 Node.metadata 中指定的元数据进行的。
或者,如果您正在使用较低级别的组合式 API:
In [ ]:
Copied!
from llama_index import get_response_synthesizer
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from llama_index.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
# build retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=3,
vector_store_query_mode="default",
filters=[ExactMatchFilter(key="name", value="paul graham")],
alpha=None,
doc_ids=None,
)
# build query engine
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=get_response_synthesizer()
)
# query
response = query_engine.query("what did the author do growing up?")
from llama_index import get_response_synthesizer
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from llama_index.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
# build retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=3,
vector_store_query_mode="default",
filters=[ExactMatchFilter(key="name", value="paul graham")],
alpha=None,
doc_ids=None,
)
# build query engine
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=get_response_synthesizer()
)
# query
response = query_engine.query("what did the author do growing up?")
配置向量存储的特定关键字参数
您还可以通过传入 vector_store_kwargs 来自定义特定向量存储实现独有的关键字参数。
In [ ]:
Copied!
query_engine = index.as_query_engine(
similarity_top_k=3,
# only works for pinecone
vector_store_kwargs={
"filter": {"name": "paul graham"},
},
)
response = query_engine.query("what did the author do growing up?")
query_engine = index.as_query_engine(
similarity_top_k=3,
# only works for pinecone
vector_store_kwargs={
"filter": {"name": "paul graham"},
},
)
response = query_engine.query("what did the author do growing up?")
使用自动检索器
你也可以利用大语言模型(LLM)自动为你决定查询设置! 目前我们支持自动配置精确匹配的元数据过滤器和 top k 参数。
In [ ]:
Copied!
from llama_index import get_response_synthesizer
from llama_index.indices.vector_store.retrievers import (
VectorIndexAutoRetriever,
)
from llama_index.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="brief biography of celebrities",
metadata_info=[
MetadataInfo(
name="category",
type="str",
description="Category of the celebrity, one of [Sports, Entertainment, Business, Music]",
),
MetadataInfo(
name="country",
type="str",
description="Country of the celebrity, one of [United States, Barbados, Portugal]",
),
],
)
# build retriever
retriever = VectorIndexAutoRetriever(
index, vector_store_info=vector_store_info
)
# build query engine
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=get_response_synthesizer()
)
# query
response = query_engine.query(
"Tell me about two celebrities from United States"
)
from llama_index import get_response_synthesizer
from llama_index.indices.vector_store.retrievers import (
VectorIndexAutoRetriever,
)
from llama_index.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="brief biography of celebrities",
metadata_info=[
MetadataInfo(
name="category",
type="str",
description="Category of the celebrity, one of [Sports, Entertainment, Business, Music]",
),
MetadataInfo(
name="country",
type="str",
description="Country of the celebrity, one of [United States, Barbados, Portugal]",
),
],
)
# build retriever
retriever = VectorIndexAutoRetriever(
index, vector_store_info=vector_store_info
)
# build query engine
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=get_response_synthesizer()
)
# query
response = query_engine.query(
"Tell me about two celebrities from United States"
)