%pip install llama-index-readers-file pymupdf
%pip install llama-index-program-openai
%pip install llama-index-llms-openai
from llama_index.core import PromptTemplate
choices = [
"Useful for questions related to apples",
"Useful for questions related to oranges",
]
def get_choice_str(choices):
choices_str = "\n\n".join(
[f"{idx+1}. {c}" for idx, c in enumerate(choices)]
)
return choices_str
choices_str = get_choice_str(choices)
router_prompt0 = PromptTemplate(
"Some choices are given below. It is provided in a numbered list (1 to"
" {num_choices}), where each item in the list corresponds to a"
" summary.\n---------------------\n{context_list}\n---------------------\nUsing"
" only the choices above and not prior knowledge, return the top choices"
" (no more than {max_outputs}, but only select what is needed) that are"
" most relevant to the question: '{query_str}'\n"
)
让我们在一组测试问题上尝试这个提示,看看会得到什么输出结果。
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
def get_formatted_prompt(query_str):
fmt_prompt = router_prompt0.format(
num_choices=len(choices),
max_outputs=2,
context_list=choices_str,
query_str=query_str,
)
return fmt_prompt
query_str = "Can you tell me more about the amount of Vitamin C in apples"
fmt_prompt = get_formatted_prompt(query_str)
response = llm.complete(fmt_prompt)
print(str(response))
1. Useful for questions related to apples
query_str = "What are the health benefits of eating orange peels?"
fmt_prompt = get_formatted_prompt(query_str)
response = llm.complete(fmt_prompt)
print(str(response))
2. Useful for questions related to oranges
query_str = (
"Can you tell me more about the amount of Vitamin C in apples and oranges."
)
fmt_prompt = get_formatted_prompt(query_str)
response = llm.complete(fmt_prompt)
print(str(response))
1. Useful for questions related to apples 2. Useful for questions related to oranges
观察:虽然响应对应了正确选项,但将其解析为结构化输出(例如单个整数)可能显得不够优雅。我们需要对选项字符串进行解析以提取出单个数字,并确保该过程能够应对各种失败情况。
2. 可生成结构化输出的路由提示¶
因此下一步是尝试提示模型输出更具结构化的表示形式(JSON)。
我们定义了一个输出解析类(RouterOutputParser)。该输出解析器将同时负责格式化提示词,并将结果解析为结构化对象(一个Answer类型)。
随后我们围绕大语言模型调用应用输出解析器的format和parse方法,通过路由提示来生成结构化输出。
2.a 导入 Answer 类¶
我们从代码库中加载 Answer 类。这是一个非常简单的数据类,包含两个字段:choice 和 reason
from dataclasses import fields
from pydantic import BaseModel
import json
class Answer(BaseModel):
choice: int
reason: str
print(json.dumps(Answer.schema(), indent=2))
{
"title": "Answer",
"type": "object",
"properties": {
"choice": {
"title": "Choice",
"type": "integer"
},
"reason": {
"title": "Reason",
"type": "string"
}
},
"required": [
"choice",
"reason"
]
}
2.b 定义路由输出解析器¶
from llama_index.core.types import BaseOutputParser
FORMAT_STR = """The output should be formatted as a JSON instance that conforms to
the JSON schema below.
Here is the output schema:
{
"type": "array",
"items": {
"type": "object",
"properties": {
"choice": {
"type": "integer"
},
"reason": {
"type": "string"
}
},
"required": [
"choice",
"reason"
],
"additionalProperties": false
}
}
"""
如果我们需要将 FORMAT_STR 作为提示模板的一部分放入 f-string 中,那么就需要对花括号进行转义,以避免它们被识别为模板变量。
def _escape_curly_braces(input_string: str) -> str:
# Replace '{' with '{{' and '}' with '}}' to escape curly braces
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
return escaped_string
我们现在定义一个简单的解析函数,用于从LLM响应中提取JSON字符串(通过搜索方括号实现)
def _marshal_output_to_json(output: str) -> str:
output = output.strip()
left = output.find("[")
right = output.find("]")
output = output[left : right + 1]
return output
我们将这些内容整合到 RouterOutputParser 中
from typing import List
class RouterOutputParser(BaseOutputParser):
def parse(self, output: str) -> List[Answer]:
"""Parse string."""
json_output = _marshal_output_to_json(output)
json_dicts = json.loads(json_output)
answers = [Answer.from_dict(json_dict) for json_dict in json_dicts]
return answers
def format(self, prompt_template: str) -> str:
return prompt_template + "\n\n" + _escape_curly_braces(FORMAT_STR)
2.c 动手实践¶
我们创建一个名为 route_query 的函数,该函数将接收输出解析器、大语言模型和提示模板作为输入,并输出结构化答案。
output_parser = RouterOutputParser()
from typing import List
def route_query(
query_str: str, choices: List[str], output_parser: RouterOutputParser
):
choices_str
fmt_base_prompt = router_prompt0.format(
num_choices=len(choices),
max_outputs=len(choices),
context_list=choices_str,
query_str=query_str,
)
fmt_json_prompt = output_parser.format(fmt_base_prompt)
raw_output = llm.complete(fmt_json_prompt)
parsed = output_parser.parse(str(raw_output))
return parsed
3. 通过函数调用端点执行路由¶
在上一节中,我们演示了如何通过文本补全端点构建路由器。这包括格式化提示词以引导模型输出结构化 JSON,以及通过解析函数加载 JSON 数据。
这个过程可能显得有些繁琐。函数调用端点(例如 OpenAI)通过允许模型原生输出结构化函数,从而消除了这种复杂性。这不再需要手动编写提示词并解析输出结果。
LlamaIndex 提供了一种名为 PydanticProgram 的抽象层,它能与函数端点集成并生成结构化的 Pydantic 对象。我们已实现与 OpenAI 和 Guidance 的集成。
我们通过注解重新定义了 Answer 类,并新增了一个包含答案列表的 Answers 类。
from pydantic import Field
class Answer(BaseModel):
"Represents a single choice with a reason."
choice: int
reason: str
class Answers(BaseModel):
"""Represents a list of answers."""
answers: List[Answer]
Answers.schema()
{'title': 'Answers',
'description': 'Represents a list of answers.',
'type': 'object',
'properties': {'answers': {'title': 'Answers',
'type': 'array',
'items': {'$ref': '#/definitions/Answer'}}},
'required': ['answers'],
'definitions': {'Answer': {'title': 'Answer',
'description': 'Represents a single choice with a reason.',
'type': 'object',
'properties': {'choice': {'title': 'Choice', 'type': 'integer'},
'reason': {'title': 'Reason', 'type': 'string'}},
'required': ['choice', 'reason']}}}
from llama_index.program.openai import OpenAIPydanticProgram
router_prompt1 = router_prompt0.partial_format(
num_choices=len(choices),
max_outputs=len(choices),
)
program = OpenAIPydanticProgram.from_defaults(
output_cls=Answers,
prompt=router_prompt1,
verbose=True,
)
query_str = "What are the health benefits of eating orange peels?"
output = program(context_list=choices_str, query_str=query_str)
Function call: Answers with args: {
"answers": [
{
"choice": 2,
"reason": "Orange peels are related to oranges"
}
]
}
output
Answers(answers=[Answer(choice=2, reason='Orange peels are related to oranges')])
4. 将路由模块作为RAG流程的组成部分¶
本节我们将把路由模块应用于RAG(检索增强生成)流程中。通过该模块,系统能动态决定执行问答任务还是摘要任务。我们可以轻松地通过向量索引实现基于top-k检索的问答查询引擎,而摘要功能则通过摘要索引完成。每个查询引擎都被定义为路由器的"选项",最终我们将整个系统整合为统一的查询引擎。
设置:加载数据¶
我们将Llama 2论文作为数据加载。
!mkdir data
!wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"
mkdir: data: File exists --2023-09-17 23:37:11-- https://arxiv.org/pdf/2307.09288.pdf Resolving arxiv.org (arxiv.org)... 128.84.21.199 Connecting to arxiv.org (arxiv.org)|128.84.21.199|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 13661300 (13M) [application/pdf] Saving to: ‘data/llama2.pdf’ data/llama2.pdf 100%[===================>] 13.03M 1.50MB/s in 9.5s 2023-09-17 23:37:22 (1.37 MB/s) - ‘data/llama2.pdf’ saved [13661300/13661300]
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
配置:定义索引¶
在此数据集上同时定义一个向量索引和摘要索引。
from llama_index.core import VectorStoreIndex
from llama_index.core import SummaryIndex
from llama_index.core.node_parser import SentenceSplitter
splitter = SentenceSplitter(chunk_size=1024)
vector_index = VectorStoreIndex.from_documents(
documents, transformations=[splitter]
)
summary_index = SummaryIndex.from_documents(
documents, transformations=[splitter]
)
vector_query_engine = vector_index.as_query_engine(llm=llm)
summary_query_engine = summary_index.as_query_engine(llm=llm)
定义 RouterQueryEngine¶
我们通过子类化 CustomQueryEngine 来定义一个自定义路由器。
from llama_index.core.query_engine import CustomQueryEngine, BaseQueryEngine
from llama_index.core.response_synthesizers import TreeSummarize
class RouterQueryEngine(CustomQueryEngine):
"""Use our Pydantic program to perform routing."""
query_engines: List[BaseQueryEngine]
choice_descriptions: List[str]
verbose: bool = False
router_prompt: PromptTemplate
llm: OpenAI
summarizer: TreeSummarize = Field(default_factory=TreeSummarize)
def custom_query(self, query_str: str):
"""Define custom query."""
program = OpenAIPydanticProgram.from_defaults(
output_cls=Answers,
prompt=router_prompt1,
verbose=self.verbose,
llm=self.llm,
)
choices_str = get_choice_str(self.choice_descriptions)
output = program(context_list=choices_str, query_str=query_str)
# print choice and reason, and query the underlying engine
if self.verbose:
print(f"Selected choice(s):")
for answer in output.answers:
print(f"Choice: {answer.choice}, Reason: {answer.reason}")
responses = []
for answer in output.answers:
choice_idx = answer.choice - 1
query_engine = self.query_engines[choice_idx]
response = query_engine.query(query_str)
responses.append(response)
# if a single choice is picked, we can just return that response
if len(responses) == 1:
return responses[0]
else:
# if multiple choices are picked, we can pick a summarizer
response_strs = [str(r) for r in responses]
result_response = self.summarizer.get_response(
query_str, response_strs
)
return result_response
choices = [
(
"Useful for answering questions about specific sections of the Llama 2"
" paper"
),
"Useful for questions that ask for a summary of the whole paper",
]
router_query_engine = RouterQueryEngine(
query_engines=[vector_query_engine, summary_query_engine],
choice_descriptions=choices,
verbose=True,
router_prompt=router_prompt1,
llm=OpenAI(model="gpt-4"),
)
试用我们构建的路由查询引擎¶
让我们来体验一下自建的路由查询引擎吧!我们提出一个问题,该问题将被路由至向量查询引擎;同时提出另一个问题,该问题将被路由至摘要生成引擎。
response = router_query_engine.query(
"How does the Llama 2 model compare to GPT-4 in the experimental results?"
)
Function call: Answers with args: {
"answers": [
{
"choice": 1,
"reason": "This question is asking for specific information about the Llama 2 model and its comparison to GPT-4 in the experimental results. Therefore, the summary that is useful for answering questions about specific sections of the paper would be most relevant."
}
]
}
Selected choice(s):
Choice: 1, Reason: This question is asking for specific information about the Llama 2 model and its comparison to GPT-4 in the experimental results. Therefore, the summary that is useful for answering questions about specific sections of the paper would be most relevant.
print(str(response))
The Llama 2 model performs better than GPT-4 in the experimental results.
response = router_query_engine.query("Can you give a summary of this paper?")
Function call: Answers with args: {
"answers": [
{
"choice": 2,
"reason": "This choice is directly related to providing a summary of the whole paper, which is what the question asks for."
}
]
}
Selected choice(s):
Choice: 2, Reason: This choice is directly related to providing a summary of the whole paper, which is what the question asks for.
print(str(response))